From f90c56d72de4848a2dc5844a97458aaf09df07c2 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Fri, 10 Jan 2020 17:14:08 +0000 Subject: Rename quantized data types to remove ambiguity for signed/unsigned payloads !android-nn-driver:2572 Change-Id: I8fe52ceb09987b3d05c539409510f535165455cc Signed-off-by: Derek Lamberti --- src/armnn/test/ConstTensorLayerVisitor.cpp | 32 +++++----- src/armnn/test/CreateWorkload.hpp | 24 ++++---- src/armnn/test/NetworkTests.cpp | 4 +- src/armnn/test/OptimizerTests.cpp | 14 ++--- src/armnn/test/QuantizerTest.cpp | 94 +++++++++++++++--------------- src/armnn/test/RuntimeTests.cpp | 4 +- src/armnn/test/TensorTest.cpp | 4 +- src/armnn/test/UtilsTests.cpp | 2 +- 8 files changed, 89 insertions(+), 89 deletions(-) (limited to 'src/armnn/test') diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp index cfcdb1d2ff..ada665e4e9 100644 --- a/src/armnn/test/ConstTensorLayerVisitor.cpp +++ b/src/armnn/test/ConstTensorLayerVisitor.cpp @@ -1248,43 +1248,43 @@ BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer) std::vector inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToInputWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToInputWeights( - TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData); + TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData); std::vector inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToForgetWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToForgetWeights( - TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData); + TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData); std::vector inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToCellWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToCellWeights( - TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData); + TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData); std::vector inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToOutputWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToOutputWeights( - TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData); + TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData); std::vector recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToInputWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToInputWeights(TensorInfo( - 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData); + 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData); std::vector recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToForgetWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToForgetWeights(TensorInfo( - 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData); + 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData); std::vector recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToCellWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToCellWeights(TensorInfo( - 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData); + 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData); std::vector recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToOutputWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToOutputWeights(TensorInfo( - 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData); + 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData); std::vector inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; @@ -1338,43 +1338,43 @@ BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer) std::vector inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToInputWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToInputWeights( - TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData); + TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData); std::vector inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToForgetWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToForgetWeights( - TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData); + TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData); std::vector inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToCellWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToCellWeights( - TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData); + TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData); std::vector inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector inputToOutputWeightsDimensions = {1, 1, 3, 3}; ConstTensor inputToOutputWeights( - TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData); + TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData); std::vector recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToInputWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToInputWeights(TensorInfo( - 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData); + 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData); std::vector recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToForgetWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToForgetWeights(TensorInfo( - 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData); + 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData); std::vector recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToCellWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToCellWeights(TensorInfo( - 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData); + 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData); std::vector recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; std::vector recurrentToOutputWeightsDimensions = {1, 1, 3, 3}; ConstTensor recurrentToOutputWeights(TensorInfo( - 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData); + 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData); std::vector inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9}; diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index f6928f858f..02ce12a304 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -399,12 +399,12 @@ std::unique_ptr CreateQuantizedLstmWorkloadTest(armnn::IW // Weights and bias tensor and quantization info armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, - armnn::DataType::QuantisedAsymm8, + armnn::DataType::QAsymmU8, weightsScale, weightsOffset); armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, - armnn::DataType::QuantisedAsymm8, + armnn::DataType::QAsymmU8, weightsScale, weightsOffset); @@ -463,17 +463,17 @@ std::unique_ptr CreateQuantizedLstmWorkloadTest(armnn::IW // Input/output tensor info and quantization info armnn::TensorInfo inputInfo({numBatches , inputSize}, - armnn::DataType::QuantisedAsymm8, + armnn::DataType::QAsymmU8, inputOutputScale, inputOutputOffset); armnn::TensorInfo cellStateInfo({numBatches , outputSize}, - armnn::DataType::QuantisedSymm16, + armnn::DataType::QSymmS16, cellStateScale, cellStateOffset); armnn::TensorInfo outputStateInfo({numBatches , outputSize}, - armnn::DataType::QuantisedAsymm8, + armnn::DataType::QAsymmU8, inputOutputScale, inputOutputOffset); @@ -530,8 +530,8 @@ std::unique_ptr CreateDirectConvolution2dWorkloadTest(arm Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0; + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; layer->m_Weight = std::make_unique(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale)); layer->m_Bias = std::make_unique @@ -637,8 +637,8 @@ std::unique_ptr CreateFullyConnectedWorkloadTest(armnn:: FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); - float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0; + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; layer->m_Weight = std::make_unique(TensorInfo({7, 20}, DataType, inputsQScale, 0)); layer->m_Bias = std::make_unique(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)); @@ -1361,7 +1361,7 @@ std::pair> Cre if (biasEnabled) { - constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QuantisedAsymm8) ? + constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ? armnn::DataType::Signed32 : armnn::DataType::Float32; TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0); @@ -1396,14 +1396,14 @@ std::pair> Cre // set the tensors in the network (NHWC format) TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType); - if (dataType == armnn::DataType::QuantisedAsymm8) + if (dataType == armnn::DataType::QAsymmU8) { inputTensorInfo.SetQuantizationOffset(0); inputTensorInfo.SetQuantizationScale(0.9f); } TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType); - if (dataType == armnn::DataType::QuantisedAsymm8) + if (dataType == armnn::DataType::QAsymmU8) { outputTensorInfo.SetQuantizationOffset(0); outputTensorInfo.SetQuantizationScale(0.9f); diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index d8b4e17a3c..5d1313f61f 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize) BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32)); const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo(); - BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8)); + BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8)); } bool m_Visited = false; @@ -411,7 +411,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize) armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32); input->GetOutputSlot(0).SetTensorInfo(infoIn); - armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8); + armnn::TensorInfo infoOut({3,1}, armnn::DataType::QAsymmU8); quantize->GetOutputSlot(0).SetTensorInfo(infoOut); Test testQuantize; diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index ba18aa31e8..e310d4f140 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -496,15 +496,15 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices) BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes) { Graph graph; - armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QuantisedAsymm8); - armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QuantisedAsymm8); + armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8); + armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8); std::vector anchorsVector(40); - armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QuantisedAsymm8), anchorsVector); + armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8), anchorsVector); - armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QuantisedAsymm8); - armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QuantisedAsymm8); - armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QuantisedAsymm8); - armnn::TensorInfo numDetectionInfo({1}, DataType::QuantisedAsymm8); + armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8); + armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8); + armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8); + armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8); Layer* input0 = graph.AddLayer(0, "boxEncodings"); input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo); diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index e147a84eb6..900aa1813e 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -82,17 +82,17 @@ protected: { switch (m_QuantizerOptions.m_ActivationFormat) { - case DataType::QuantisedAsymm8: + case DataType::QAsymmU8: TestQuantizationParamsImpl( - info, DataType::QuantisedAsymm8, qAsymm8Params.first, qAsymm8Params.second); + info, DataType::QAsymmU8, qAsymm8Params.first, qAsymm8Params.second); break; case DataType::QSymmS8: TestQuantizationParamsImpl( info, DataType::QSymmS8, qSymm8Params.first, qSymm8Params.second); break; - case DataType::QuantisedSymm16: + case DataType::QSymmS16: TestQuantizationParamsImpl( - info, DataType::QuantisedSymm16, qSymm16Params.first, qSymm16Params.second); + info, DataType::QSymmS16, qSymm16Params.first, qSymm16Params.second); break; default: throw InvalidArgumentException("Unsupported quantization target"); @@ -106,27 +106,27 @@ protected: void TestConstantQuantizationParams(const TensorInfo& info, const OffsetScalePair& params, - DataType dataType = DataType::QuantisedAsymm8) + DataType dataType = DataType::QAsymmU8) { boost::ignore_unused(dataType); - TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second); + TestQuantizationParamsImpl(info, DataType::QAsymmU8, params.first, params.second); } void TestBiasQuantizationParams(const TensorInfo& info, const OffsetScalePair& qAsymm8Params, const OffsetScalePair& qSymm8Params, const OffsetScalePair& qSymm16Params, - DataType dataType = DataType::QuantisedAsymm8) + DataType dataType = DataType::QAsymmU8) { switch (m_QuantizerOptions.m_ActivationFormat) { - case DataType::QuantisedAsymm8: + case DataType::QAsymmU8: TestQuantizationParamsImpl(info, dataType, qAsymm8Params.first, qAsymm8Params.second); break; case DataType::QSymmS8: TestQuantizationParamsImpl(info, dataType, qSymm8Params.first, qSymm8Params.second); break; - case DataType::QuantisedSymm16: + case DataType::QSymmS16: TestQuantizationParamsImpl(info, dataType, qSymm16Params.first, qSymm16Params.second); break; default: @@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAddition) TestAdditionQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestAdditionQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -399,7 +399,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbsActivation) TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -424,7 +424,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLinearActivation) TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -449,7 +449,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReLuActivation) TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -474,7 +474,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation) TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -524,7 +524,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation) TestBoundedReluActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestBoundedReluActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -575,7 +575,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation) TestTanHActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestTanHActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -636,7 +636,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation) TestLeakyReLuActivationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestLeakyReLuActivationQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -719,7 +719,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm) TestBatchNormalizationQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions QQsymm16Options(DataType::QuantisedSymm16); + const QuantizerOptions QQsymm16Options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork(); TestBatchNormalizationQuantization validatorQSymm16(QQsymm16Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -784,7 +784,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace) VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); // test QSymm16 quantization - const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16); + const QuantizerOptions Qsymm16Options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); TestDepthToSpaceQuantization validatorQSymm16(Qsymm16Options, inputShape, outputShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -944,7 +944,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled) TestFullyConnectedQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16); + const QuantizerOptions Qsymm16Options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); TestFullyConnectedQuantization validatorQSymm16(Qsymm16Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1025,7 +1025,7 @@ void TestQuantizeConvolution2d(bool useBiases) TestConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16); + const QuantizerOptions Qsymm16Options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); TestConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1106,7 +1106,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases) TestDepthwiseConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16); + const QuantizerOptions Qsymm16Options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork(); TestDepthwiseConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1177,7 +1177,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization) VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); // test QSymm16 quantization - const QuantizerOptions qSymm16Options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16Options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16Options)->ExportNetwork(); TestInstanceNormalizationQuantization validatorQSymm16(qSymm16Options, tensorShape, tensorShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1241,7 +1241,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax) VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); // test QuantisedSymm16 quantization - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestLogSoftmaxQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1311,7 +1311,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax) TestSoftmaxQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestSoftmaxQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1348,7 +1348,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStandIn) armnn::UnimplementedException); // test QuantisedSymm16 quantization - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); BOOST_CHECK_THROW(INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(), armnn::UnimplementedException); } @@ -1434,7 +1434,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute) TestPermuteQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestPermuteQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1484,7 +1484,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch) TestSpaceToBatchQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestSpaceToBatchQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1536,7 +1536,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth) TestSpaceToDepthQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestSpaceToDepthQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1600,7 +1600,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d) TestPooling2dQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestPooling2dQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1666,7 +1666,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant) TestConstantQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestConstantQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1721,7 +1721,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbs) TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1804,7 +1804,7 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax) TestArgMinMaxQuantization validatorQSymm8(qSymm8Options, inputShape, outputShape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestArgMinMaxQuantization validatorQSymm16(qSymm16options, inputShape, outputShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1868,7 +1868,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison) VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); // test QuantisedSymm16 quantization - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestComparisonQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -1949,7 +1949,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat) concatLayer->GetOutputSlot(0).SetTensorInfo(info); const QuantizerOptions qSymm8Options(DataType::QSymmS8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkQuantizerPtr quantizerPtrQAsymm8 = INetworkQuantizer::Create(network.get()); INetworkQuantizerPtr quantizerPtrQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options); INetworkQuantizerPtr quantizerPtrQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options); @@ -2026,7 +2026,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape) TestReshapeQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestReshapeQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2075,7 +2075,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter) TestSplitterQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestSplitterQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2129,7 +2129,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize) TestResizeQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestResizeQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2179,7 +2179,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice) TestStridedSliceQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestStridedSliceQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2229,7 +2229,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace) TestBatchToSpaceQuantization validatorQSymm8(qSymm8Options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestBatchToSpaceQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2343,7 +2343,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu) TestPreluQuantization validatorQSymm8(qSymm8Options, inputShape, alphaShape, outputShape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestPreluQuantization validatorQSymm16(qSymm16options, inputShape, alphaShape, outputShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2417,7 +2417,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases) VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); // test QSymm16 quantization - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestTransposeConvolution2dQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2500,7 +2500,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStack) TestStackQuantization validatorQSymm8(qSymm8Options, inputShape, inputShape); VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestStackQuantization validatorQSymm16(qSymm16options, inputShape, outputShape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2562,7 +2562,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice) VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); // test QSymm16 quantization - const QuantizerOptions qSymm16options(DataType::QuantisedSymm16); + const QuantizerOptions qSymm16options(DataType::QSymmS16); INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); TestSliceQuantization validatorQSymm16(qSymm16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); @@ -2679,7 +2679,7 @@ void PreserveTypeTestImpl(const DataType& dataType) addition->GetOutputSlot(0).SetTensorInfo(info); QuantizerOptions options = dataType == DataType::Float32 ? - QuantizerOptions(DataType::QuantisedAsymm8, true) : QuantizerOptions(dataType, true); + QuantizerOptions(DataType::QAsymmU8, true) : QuantizerOptions(dataType, true); INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork(); TestPreserveType validatorQAsymm8(options, dataType, shape, shape); @@ -2695,7 +2695,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeFloat32) BOOST_AUTO_TEST_CASE(PreserveTypeQAsymm8) { - PreserveTypeTestImpl(DataType::QuantisedAsymm8); + PreserveTypeTestImpl(DataType::QAsymmU8); } BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8) @@ -2705,7 +2705,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8) BOOST_AUTO_TEST_CASE(PreserveTypeQsymm16) { - PreserveTypeTestImpl(DataType::QuantisedSymm16); + PreserveTypeTestImpl(DataType::QSymmS16); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 642f334575..7263cbd784 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -261,14 +261,14 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue) input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo( armnn::TensorShape({ 1, 5 }), - armnn::DataType::QuantisedAsymm8, + armnn::DataType::QAsymmU8, 1.0f/255, 0 )); softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo( armnn::TensorShape({ 1, 5 }), - armnn::DataType::QuantisedAsymm8 + armnn::DataType::QAsymmU8 )); std::vector backends = {armnn::Compute::CpuRef}; diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp index 154a0bca04..3696a112cb 100644 --- a/src/armnn/test/TensorTest.cpp +++ b/src/armnn/test/TensorTest.cpp @@ -125,8 +125,8 @@ BOOST_AUTO_TEST_CASE(ModifyTensorInfo) TensorInfo info; info.SetShape({ 5, 6, 7, 8 }); BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 }))); - info.SetDataType(DataType::QuantisedAsymm8); - BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); + info.SetDataType(DataType::QAsymmU8); + BOOST_TEST((info.GetDataType() == DataType::QAsymmU8)); info.SetQuantizationScale(10.0f); BOOST_TEST(info.GetQuantizationScale() == 10.0f); info.SetQuantizationOffset(5); diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp index 897a35fe4c..4c371d6ed9 100644 --- a/src/armnn/test/UtilsTests.cpp +++ b/src/armnn/test/UtilsTests.cpp @@ -18,7 +18,7 @@ BOOST_AUTO_TEST_SUITE(Utils) BOOST_AUTO_TEST_CASE(DataTypeSize) { BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4); - BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QuantisedAsymm8) == 1); + BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1); BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4); BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1); } -- cgit v1.2.1