diff options
author | Derek Lamberti <derek.lamberti@arm.com> | 2020-01-10 17:14:08 +0000 |
---|---|---|
committer | Kevin May <kevin.may@arm.com> | 2020-01-13 18:18:12 +0000 |
commit | f90c56d72de4848a2dc5844a97458aaf09df07c2 (patch) | |
tree | 71f1c6f16a4687286614f5526ed70938a611b27d /tests | |
parent | 842e0dbd40114e19bf26916fefe06c869dbe416d (diff) | |
download | armnn-f90c56d72de4848a2dc5844a97458aaf09df07c2.tar.gz |
Rename quantized data types to remove ambiguity for signed/unsigned payloads
!android-nn-driver:2572
Change-Id: I8fe52ceb09987b3d05c539409510f535165455cc
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'tests')
5 files changed, 11 insertions, 11 deletions
diff --git a/tests/DeepSpeechV1Database.hpp b/tests/DeepSpeechV1Database.hpp index a690e3fece..81523775db 100644 --- a/tests/DeepSpeechV1Database.hpp +++ b/tests/DeepSpeechV1Database.hpp @@ -83,7 +83,7 @@ inline auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream) } template<> -inline auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream, +inline auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream, const float& quantizationScale, const int32_t& quantizationOffset) { diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp index 4e8fe78ad8..9c23200802 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp @@ -225,7 +225,7 @@ public: } else if (m_OutputType == "qasymm8") { - return armnn::DataType::QuantisedAsymm8; + return armnn::DataType::QAsymmU8; } else { @@ -272,7 +272,7 @@ int main(int argc, char* argv[]) imageDataContainers.push_back(PrepareImageTensor<int>( imagePath, newWidth, newHeight, normParams, batchSize, outputLayout)); break; - case armnn::DataType::QuantisedAsymm8: + case armnn::DataType::QAsymmU8: imageDataContainers.push_back(PrepareImageTensor<uint8_t>( imagePath, newWidth, newHeight, normParams, batchSize, outputLayout)); break; diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp index b1cb5e36f5..4793f822fb 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp @@ -60,7 +60,7 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode case armnn::DataType::Signed32: normParams.mean = { 128.0, 128.0, 128.0 }; break; - case armnn::DataType::QuantisedAsymm8: + case armnn::DataType::QAsymmU8: default: break; } diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp index 0d7d7689e3..ecfc21209c 100644 --- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp +++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp @@ -322,7 +322,7 @@ int main(int argc, char* argv[]) inputTensorDataLayout)); outputDataContainers = { vector<int>(outputNumElements) }; break; - case armnn::DataType::QuantisedAsymm8: + case armnn::DataType::QAsymmU8: inputDataContainers.push_back( PrepareImageTensor<uint8_t>(imagePath.string(), inputTensorWidth, inputTensorHeight, diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index f9e9b146d4..ff460dd85e 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -145,14 +145,14 @@ auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream) } template<> -auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream) +auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream) { return ParseArrayImpl<uint8_t>(stream, [](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); }); } template<> -auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream, +auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream, const float& quantizationScale, const int32_t& quantizationOffset) { @@ -309,8 +309,8 @@ void PopulateTensorWithData(TContainer& tensorData, const int qOffset = qParams.value().second; tensorData = readFromFile ? - ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile, qScale, qOffset) : - GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements); + ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) : + GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements); } else { @@ -328,8 +328,8 @@ void PopulateTensorWithData(TContainer& tensorData, else if (dataTypeStr.compare("qasymm8") == 0) { tensorData = readFromFile ? - ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile) : - GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements); + ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) : + GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements); } else { |