aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2020-01-10 17:14:08 +0000
committerKevin May <kevin.may@arm.com>2020-01-13 18:18:12 +0000
commitf90c56d72de4848a2dc5844a97458aaf09df07c2 (patch)
tree71f1c6f16a4687286614f5526ed70938a611b27d /src/armnn
parent842e0dbd40114e19bf26916fefe06c869dbe416d (diff)
downloadarmnn-f90c56d72de4848a2dc5844a97458aaf09df07c2.tar.gz
Rename quantized data types to remove ambiguity for signed/unsigned payloads
!android-nn-driver:2572 Change-Id: I8fe52ceb09987b3d05c539409510f535165455cc Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/CompatibleTypes.hpp4
-rw-r--r--src/armnn/LayerSupportCommon.hpp2
-rw-r--r--src/armnn/Network.cpp2
-rw-r--r--src/armnn/NetworkQuantizationScheme.hpp4
-rw-r--r--src/armnn/NetworkQuantizer.cpp4
-rw-r--r--src/armnn/NetworkQuantizerUtils.cpp2
-rw-r--r--src/armnn/ResolveType.hpp4
-rw-r--r--src/armnn/Tensor.cpp2
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp32
-rw-r--r--src/armnn/test/CreateWorkload.hpp24
-rw-r--r--src/armnn/test/NetworkTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp14
-rw-r--r--src/armnn/test/QuantizerTest.cpp94
-rw-r--r--src/armnn/test/RuntimeTests.cpp4
-rw-r--r--src/armnn/test/TensorTest.cpp4
-rw-r--r--src/armnn/test/UtilsTests.cpp2
16 files changed, 101 insertions, 101 deletions
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
index cc545a9642..bca092ca0c 100644
--- a/src/armnn/CompatibleTypes.hpp
+++ b/src/armnn/CompatibleTypes.hpp
@@ -32,7 +32,7 @@ inline bool CompatibleTypes<Half>(DataType dataType)
template<>
inline bool CompatibleTypes<uint8_t>(DataType dataType)
{
- return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8;
+ return dataType == DataType::Boolean || dataType == DataType::QAsymmU8;
}
template<>
@@ -44,7 +44,7 @@ inline bool CompatibleTypes<int8_t>(DataType dataType)
template<>
inline bool CompatibleTypes<int16_t>(DataType dataType)
{
- return dataType == DataType::QuantisedSymm16;
+ return dataType == DataType::QSymmS16;
}
template<>
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index 557e72a323..e0c6b8040c 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -40,7 +40,7 @@ bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
return float16FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
case DataType::Float32:
return float32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return uint8FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
case DataType::Signed32:
return int32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 1406160914..554e2e26a5 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -101,7 +101,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
for (unsigned int i = 0; i < numOutputs; i++) {
OutputSlot& outputSlot = layer->GetOutputSlot(i);
TensorInfo info = outputSlot.GetTensorInfo();
- if (DataType::QuantisedAsymm8 == info.GetDataType()) {
+ if (DataType::QAsymmU8 == info.GetDataType()) {
if (0.f == info.GetQuantizationScale()) {
noErrors = false;
std::stringstream ss;
diff --git a/src/armnn/NetworkQuantizationScheme.hpp b/src/armnn/NetworkQuantizationScheme.hpp
index ea3c29102b..a5b7542748 100644
--- a/src/armnn/NetworkQuantizationScheme.hpp
+++ b/src/armnn/NetworkQuantizationScheme.hpp
@@ -58,7 +58,7 @@ struct QAsymm8QuantizationScheme : IQuantizationScheme
int NumBits() const override { return 8; }
- DataType GetDataType() const override { return DataType::QuantisedAsymm8; }
+ DataType GetDataType() const override { return DataType::QAsymmU8; }
};
struct QSymmS8QuantizationScheme : IQuantizationScheme
@@ -119,7 +119,7 @@ struct QSymm16QuantizationScheme : IQuantizationScheme
int NumBits() const override { return 16; }
- DataType GetDataType() const override { return DataType::QuantisedSymm16; }
+ DataType GetDataType() const override { return DataType::QSymmS16; }
};
} // namespace armnn
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp
index f6d625fda3..5e00de5fcb 100644
--- a/src/armnn/NetworkQuantizer.cpp
+++ b/src/armnn/NetworkQuantizer.cpp
@@ -160,13 +160,13 @@ INetworkPtr NetworkQuantizer::ExportNetwork()
std::unique_ptr<IQuantizationScheme> quantizationScheme;
switch (m_Options.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
quantizationScheme = std::make_unique<QAsymm8QuantizationScheme>();
break;
case DataType::QSymmS8:
quantizationScheme = std::make_unique<QSymmS8QuantizationScheme>();
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
quantizationScheme = std::make_unique<QSymm16QuantizationScheme>();
break;
default:
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp
index a6f9ebdc42..75473b4ae6 100644
--- a/src/armnn/NetworkQuantizerUtils.cpp
+++ b/src/armnn/NetworkQuantizerUtils.cpp
@@ -36,7 +36,7 @@ ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>
BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
}
- TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QuantisedAsymm8, scale, offset);
+ TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
return ConstTensor(qInfo, backing);
}
diff --git a/src/armnn/ResolveType.hpp b/src/armnn/ResolveType.hpp
index ba3d0fca5c..c7a244dba8 100644
--- a/src/armnn/ResolveType.hpp
+++ b/src/armnn/ResolveType.hpp
@@ -27,7 +27,7 @@ struct ResolveTypeImpl<DataType::Float32>
};
template<>
-struct ResolveTypeImpl<DataType::QuantisedAsymm8>
+struct ResolveTypeImpl<DataType::QAsymmU8>
{
using Type = uint8_t;
};
@@ -39,7 +39,7 @@ struct ResolveTypeImpl<DataType::QSymmS8>
};
template<>
-struct ResolveTypeImpl<DataType::QuantisedSymm16>
+struct ResolveTypeImpl<DataType::QSymmS16>
{
using Type = int16_t;
};
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index 171e02ad13..8eebc43cb5 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -289,7 +289,7 @@ void TensorInfo::SetQuantizationDim(const Optional<unsigned int>& quantizationDi
bool TensorInfo::IsQuantized() const
{
- return m_DataType == DataType::QuantisedAsymm8 || m_DataType == DataType::QuantisedSymm16;
+ return m_DataType == DataType::QAsymmU8 || m_DataType == DataType::QSymmS16;
}
// ---
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index cfcdb1d2ff..ada665e4e9 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -1248,43 +1248,43 @@ BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+ 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+ 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -1338,43 +1338,43 @@ BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+ 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+ 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index f6928f858f..02ce12a304 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -399,12 +399,12 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Weights and bias tensor and quantization info
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
@@ -463,17 +463,17 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Input/output tensor info and quantization info
armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
@@ -530,8 +530,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
@@ -637,8 +637,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
@@ -1361,7 +1361,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
if (biasEnabled)
{
- constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QuantisedAsymm8) ?
+ constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
armnn::DataType::Signed32 : armnn::DataType::Float32;
TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0);
@@ -1396,14 +1396,14 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
// set the tensors in the network (NHWC format)
TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
- if (dataType == armnn::DataType::QuantisedAsymm8)
+ if (dataType == armnn::DataType::QAsymmU8)
{
inputTensorInfo.SetQuantizationOffset(0);
inputTensorInfo.SetQuantizationScale(0.9f);
}
TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
- if (dataType == armnn::DataType::QuantisedAsymm8)
+ if (dataType == armnn::DataType::QAsymmU8)
{
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(0.9f);
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index d8b4e17a3c..5d1313f61f 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8));
+ BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
}
bool m_Visited = false;
@@ -411,7 +411,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32);
input->GetOutputSlot(0).SetTensorInfo(infoIn);
- armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo infoOut({3,1}, armnn::DataType::QAsymmU8);
quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
Test testQuantize;
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index ba18aa31e8..e310d4f140 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -496,15 +496,15 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
{
Graph graph;
- armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QuantisedAsymm8);
- armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QuantisedAsymm8);
+ armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
+ armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
std::vector<uint8_t> anchorsVector(40);
- armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QuantisedAsymm8), anchorsVector);
+ armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8), anchorsVector);
- armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QuantisedAsymm8);
- armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QuantisedAsymm8);
- armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QuantisedAsymm8);
- armnn::TensorInfo numDetectionInfo({1}, DataType::QuantisedAsymm8);
+ armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
+ armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
+ armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
+ armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index e147a84eb6..900aa1813e 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -82,17 +82,17 @@ protected:
{
switch (m_QuantizerOptions.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
TestQuantizationParamsImpl(
- info, DataType::QuantisedAsymm8, qAsymm8Params.first, qAsymm8Params.second);
+ info, DataType::QAsymmU8, qAsymm8Params.first, qAsymm8Params.second);
break;
case DataType::QSymmS8:
TestQuantizationParamsImpl(
info, DataType::QSymmS8, qSymm8Params.first, qSymm8Params.second);
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
TestQuantizationParamsImpl(
- info, DataType::QuantisedSymm16, qSymm16Params.first, qSymm16Params.second);
+ info, DataType::QSymmS16, qSymm16Params.first, qSymm16Params.second);
break;
default:
throw InvalidArgumentException("Unsupported quantization target");
@@ -106,27 +106,27 @@ protected:
void TestConstantQuantizationParams(const TensorInfo& info,
const OffsetScalePair& params,
- DataType dataType = DataType::QuantisedAsymm8)
+ DataType dataType = DataType::QAsymmU8)
{
boost::ignore_unused(dataType);
- TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second);
+ TestQuantizationParamsImpl(info, DataType::QAsymmU8, params.first, params.second);
}
void TestBiasQuantizationParams(const TensorInfo& info,
const OffsetScalePair& qAsymm8Params,
const OffsetScalePair& qSymm8Params,
const OffsetScalePair& qSymm16Params,
- DataType dataType = DataType::QuantisedAsymm8)
+ DataType dataType = DataType::QAsymmU8)
{
switch (m_QuantizerOptions.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
TestQuantizationParamsImpl(info, dataType, qAsymm8Params.first, qAsymm8Params.second);
break;
case DataType::QSymmS8:
TestQuantizationParamsImpl(info, dataType, qSymm8Params.first, qSymm8Params.second);
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
TestQuantizationParamsImpl(info, dataType, qSymm16Params.first, qSymm16Params.second);
break;
default:
@@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAddition)
TestAdditionQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestAdditionQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -399,7 +399,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbsActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -424,7 +424,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLinearActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -449,7 +449,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReLuActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -474,7 +474,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -524,7 +524,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
TestBoundedReluActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestBoundedReluActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -575,7 +575,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
TestTanHActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestTanHActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -636,7 +636,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
TestLeakyReLuActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestLeakyReLuActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -719,7 +719,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
TestBatchNormalizationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions QQsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions QQsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork();
TestBatchNormalizationQuantization validatorQSymm16(QQsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -784,7 +784,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestDepthToSpaceQuantization validatorQSymm16(Qsymm16Options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -944,7 +944,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled)
TestFullyConnectedQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestFullyConnectedQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1025,7 +1025,7 @@ void TestQuantizeConvolution2d(bool useBiases)
TestConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1106,7 +1106,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
TestDepthwiseConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestDepthwiseConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1177,7 +1177,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16Options)->ExportNetwork();
TestInstanceNormalizationQuantization validatorQSymm16(qSymm16Options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1241,7 +1241,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestLogSoftmaxQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1311,7 +1311,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
TestSoftmaxQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSoftmaxQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1348,7 +1348,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStandIn)
armnn::UnimplementedException);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
BOOST_CHECK_THROW(INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(),
armnn::UnimplementedException);
}
@@ -1434,7 +1434,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute)
TestPermuteQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPermuteQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1484,7 +1484,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
TestSpaceToBatchQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSpaceToBatchQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1536,7 +1536,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth)
TestSpaceToDepthQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSpaceToDepthQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1600,7 +1600,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d)
TestPooling2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPooling2dQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1666,7 +1666,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
TestConstantQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestConstantQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1721,7 +1721,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbs)
TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1804,7 +1804,7 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
TestArgMinMaxQuantization validatorQSymm8(qSymm8Options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestArgMinMaxQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1868,7 +1868,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestComparisonQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1949,7 +1949,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat)
concatLayer->GetOutputSlot(0).SetTensorInfo(info);
const QuantizerOptions qSymm8Options(DataType::QSymmS8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkQuantizerPtr quantizerPtrQAsymm8 = INetworkQuantizer::Create(network.get());
INetworkQuantizerPtr quantizerPtrQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options);
INetworkQuantizerPtr quantizerPtrQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options);
@@ -2026,7 +2026,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape)
TestReshapeQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestReshapeQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2075,7 +2075,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter)
TestSplitterQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSplitterQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2129,7 +2129,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize)
TestResizeQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestResizeQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2179,7 +2179,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
TestStridedSliceQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestStridedSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2229,7 +2229,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
TestBatchToSpaceQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestBatchToSpaceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2343,7 +2343,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
TestPreluQuantization validatorQSymm8(qSymm8Options, inputShape, alphaShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPreluQuantization validatorQSymm16(qSymm16options, inputShape, alphaShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2417,7 +2417,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestTransposeConvolution2dQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2500,7 +2500,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
TestStackQuantization validatorQSymm8(qSymm8Options, inputShape, inputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestStackQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2562,7 +2562,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2679,7 +2679,7 @@ void PreserveTypeTestImpl(const DataType& dataType)
addition->GetOutputSlot(0).SetTensorInfo(info);
QuantizerOptions options = dataType == DataType::Float32 ?
- QuantizerOptions(DataType::QuantisedAsymm8, true) : QuantizerOptions(dataType, true);
+ QuantizerOptions(DataType::QAsymmU8, true) : QuantizerOptions(dataType, true);
INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
TestPreserveType validatorQAsymm8(options, dataType, shape, shape);
@@ -2695,7 +2695,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeFloat32)
BOOST_AUTO_TEST_CASE(PreserveTypeQAsymm8)
{
- PreserveTypeTestImpl(DataType::QuantisedAsymm8);
+ PreserveTypeTestImpl(DataType::QAsymmU8);
}
BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
@@ -2705,7 +2705,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
BOOST_AUTO_TEST_CASE(PreserveTypeQsymm16)
{
- PreserveTypeTestImpl(DataType::QuantisedSymm16);
+ PreserveTypeTestImpl(DataType::QSymmS16);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 642f334575..7263cbd784 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -261,14 +261,14 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
1.0f/255,
0
));
softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QuantisedAsymm8
+ armnn::DataType::QAsymmU8
));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index 154a0bca04..3696a112cb 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -125,8 +125,8 @@ BOOST_AUTO_TEST_CASE(ModifyTensorInfo)
TensorInfo info;
info.SetShape({ 5, 6, 7, 8 });
BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- info.SetDataType(DataType::QuantisedAsymm8);
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ info.SetDataType(DataType::QAsymmU8);
+ BOOST_TEST((info.GetDataType() == DataType::QAsymmU8));
info.SetQuantizationScale(10.0f);
BOOST_TEST(info.GetQuantizationScale() == 10.0f);
info.SetQuantizationOffset(5);
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index 897a35fe4c..4c371d6ed9 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -18,7 +18,7 @@ BOOST_AUTO_TEST_SUITE(Utils)
BOOST_AUTO_TEST_CASE(DataTypeSize)
{
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QuantisedAsymm8) == 1);
+ BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
}