aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2020-01-10 17:28:20 +0000
committerKevin May <kevin.may@arm.com>2020-01-13 13:41:52 +0000
commit1a38cdaefc5e53ba6bbaba54651dda459b81eafe (patch)
tree0a3b7163cf5bbb783945e5deed96828cdd5b21db
parent7b8d2e65c129263e9cdbdc82e5f73dd4d263aafb (diff)
downloadandroid-nn-driver-1a38cdaefc5e53ba6bbaba54651dda459b81eafe.tar.gz
Rename quantized data types
!armnn:2571 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com> Change-Id: I06977553a097479a2a996e76a106249673d31ed7
-rw-r--r--ConversionUtils.hpp2
-rw-r--r--Utils.cpp10
2 files changed, 6 insertions, 6 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index b53432ca..a0ab9e5a 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -2961,7 +2961,7 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData&
// Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
// value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
// (QuantizationOffset - QuantizationOffset) * scale = 0.
- if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
+ if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
{
descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
}
diff --git a/Utils.cpp b/Utils.cpp
index 0211e92a..cdebfaed 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -43,7 +43,7 @@ void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void
{
case armnn::DataType::Float16:
case armnn::DataType::Float32:
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::QuantizedSymm8PerAxis:
SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
break;
@@ -83,7 +83,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
type = armnn::DataType::Float32;
break;
case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
- type = armnn::DataType::QuantisedAsymm8;
+ type = armnn::DataType::QAsymmU8;
break;
case V1_0::OperandType::TENSOR_INT32:
type = armnn::DataType::Signed32;
@@ -119,13 +119,13 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
type = armnn::DataType::QuantizedSymm8PerAxis;
break;
case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
- type = armnn::DataType::QuantisedAsymm8;
+ type = armnn::DataType::QAsymmU8;
break;
case V1_2::OperandType::TENSOR_QUANT8_SYMM:
type = armnn::DataType::QSymmS8;
break;
case V1_2::OperandType::TENSOR_QUANT16_SYMM:
- type = armnn::DataType::QuantisedSymm16;
+ type = armnn::DataType::QSymmS16;
break;
case V1_2::OperandType::TENSOR_INT32:
type = armnn::DataType::Signed32;
@@ -228,7 +228,7 @@ void DumpTensor(const std::string& dumpDir,
dumpElementFunction = &DumpTensorElement<float>;
break;
}
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
break;