diff options
-rw-r--r-- | ConversionUtils.hpp | 2 | ||||
-rw-r--r-- | Utils.cpp | 10 |
2 files changed, 6 insertions, 6 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index b53432ca..a0ab9e5a 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -2961,7 +2961,7 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as // (QuantizationOffset - QuantizationOffset) * scale = 0. - if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8) + if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8) { descriptor.m_PadValue = inputInfo.GetQuantizationOffset(); } @@ -43,7 +43,7 @@ void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void { case armnn::DataType::Float16: case armnn::DataType::Float32: - case armnn::DataType::QuantisedAsymm8: + case armnn::DataType::QAsymmU8: case armnn::DataType::QuantizedSymm8PerAxis: SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings); break; @@ -83,7 +83,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand) type = armnn::DataType::Float32; break; case V1_0::OperandType::TENSOR_QUANT8_ASYMM: - type = armnn::DataType::QuantisedAsymm8; + type = armnn::DataType::QAsymmU8; break; case V1_0::OperandType::TENSOR_INT32: type = armnn::DataType::Signed32; @@ -119,13 +119,13 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand) type = armnn::DataType::QuantizedSymm8PerAxis; break; case V1_2::OperandType::TENSOR_QUANT8_ASYMM: - type = armnn::DataType::QuantisedAsymm8; + type = armnn::DataType::QAsymmU8; break; case V1_2::OperandType::TENSOR_QUANT8_SYMM: type = armnn::DataType::QSymmS8; break; case V1_2::OperandType::TENSOR_QUANT16_SYMM: - type = armnn::DataType::QuantisedSymm16; + type = armnn::DataType::QSymmS16; break; case V1_2::OperandType::TENSOR_INT32: type = armnn::DataType::Signed32; @@ -228,7 +228,7 @@ void DumpTensor(const std::string& dumpDir, dumpElementFunction = &DumpTensorElement<float>; break; } - case armnn::DataType::QuantisedAsymm8: + case armnn::DataType::QAsymmU8: { dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>; break; |