diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-02-22 15:13:12 +0000 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2021-03-03 17:06:43 +0000 |
commit | f806c4d075814a9dc9d206a4db123d3060ad7ebd (patch) | |
tree | a110c106598a6830d0862526742314f5048b8acb /tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp | |
parent | 82c59d75ef0f191887fae1cc2864bbf4b37ac0c5 (diff) | |
download | armnn-f806c4d075814a9dc9d206a4db123d3060ad7ebd.tar.gz |
IVGCVSW-5612 Fix tiny_wav2letter_relu_fixed_int8 delegate outputexperimental/abi-tests
* fix delegate perchannel quantization
* change delegate to check reshape options before inputs
* Add int8 "qsymms8" option to ExecuteNetwork
* Add option to run ExecuteNetwork on tflite w/o delegate
!referencetests:301301
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: If3e12599b17aff1199d7ab0a55e1c901e480083d
Diffstat (limited to 'tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp')
-rw-r--r-- | tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp index 2afd941636..d902d23d86 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp @@ -52,6 +52,16 @@ auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream) [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); }); } + +template<> +auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream) +{ + return ParseArrayImpl<int8_t>(stream, + [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); }); +} + + + template<> auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream, const float& quantizationScale, @@ -130,6 +140,15 @@ void TensorPrinter::operator()(const std::vector<uint8_t>& values) } } +void TensorPrinter::operator()(const std::vector<int8_t>& values) +{ + ForEachValue(values, [](int8_t value) + { + printf("%d ", value); + }); + WriteToFile(values); +} + void TensorPrinter::operator()(const std::vector<int>& values) { ForEachValue(values, [](int value) @@ -170,7 +189,8 @@ void TensorPrinter::WriteToFile(const std::vector<T>& values) } } -using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; +using TContainer = + mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>; using QuantizationParams = std::pair<float, int32_t>; void PopulateTensorWithData(TContainer& tensorData, @@ -212,6 +232,12 @@ void PopulateTensorWithData(TContainer& tensorData, ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) : GenerateDummyTensorData<armnn::DataType::Signed32>(numElements); } + else if (dataTypeStr.compare("qsymms8") == 0) + { + tensorData = readFromFile ? + ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) : + GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements); + } else if (dataTypeStr.compare("qasymm8") == 0) { tensorData = readFromFile ? |