aboutsummaryrefslogtreecommitdiff
path: root/Utils.cpp
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2020-01-10 17:28:20 +0000
committerKevin May <kevin.may@arm.com>2020-01-13 13:41:52 +0000
commit1a38cdaefc5e53ba6bbaba54651dda459b81eafe (patch)
tree0a3b7163cf5bbb783945e5deed96828cdd5b21db /Utils.cpp
parent7b8d2e65c129263e9cdbdc82e5f73dd4d263aafb (diff)
downloadandroid-nn-driver-1a38cdaefc5e53ba6bbaba54651dda459b81eafe.tar.gz
Rename quantized data types
!armnn:2571 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com> Change-Id: I06977553a097479a2a996e76a106249673d31ed7
Diffstat (limited to 'Utils.cpp')
-rw-r--r--Utils.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/Utils.cpp b/Utils.cpp
index 0211e92a..cdebfaed 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -43,7 +43,7 @@ void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void
{
case armnn::DataType::Float16:
case armnn::DataType::Float32:
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::QuantizedSymm8PerAxis:
SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
break;
@@ -83,7 +83,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
type = armnn::DataType::Float32;
break;
case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
- type = armnn::DataType::QuantisedAsymm8;
+ type = armnn::DataType::QAsymmU8;
break;
case V1_0::OperandType::TENSOR_INT32:
type = armnn::DataType::Signed32;
@@ -119,13 +119,13 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
type = armnn::DataType::QuantizedSymm8PerAxis;
break;
case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
- type = armnn::DataType::QuantisedAsymm8;
+ type = armnn::DataType::QAsymmU8;
break;
case V1_2::OperandType::TENSOR_QUANT8_SYMM:
type = armnn::DataType::QSymmS8;
break;
case V1_2::OperandType::TENSOR_QUANT16_SYMM:
- type = armnn::DataType::QuantisedSymm16;
+ type = armnn::DataType::QSymmS16;
break;
case V1_2::OperandType::TENSOR_INT32:
type = armnn::DataType::Signed32;
@@ -228,7 +228,7 @@ void DumpTensor(const std::string& dumpDir,
dumpElementFunction = &DumpTensorElement<float>;
break;
}
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
break;