aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2021-07-21 09:42:43 +0100
committermike.kelly <mike.kelly@arm.com>2021-07-26 08:56:53 +0000
commitd7ed6d4e53a877a25fcea754d76c8831451f18f1 (patch)
tree74edb3b7cdc991232bb8f8577ae2fd89dfc95b0a
parent4adf0de1f2380c215b7d6f643afe04ef4366df1e (diff)
downloadarmnn-experimental/CustomAllocator3.tar.gz
GitHub #557 wrong result in int8 modelexperimental/CustomAllocator3
* Added support for qasymms8 (int8) to ImageTensorGenerator * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator * Added support for qasymms8 (int8) to ExecuteNetwork * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork * Set tflite to be the default model format in ImageTensorGenerator as it's the only supported model format. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp36
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp6
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp18
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.hpp34
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp30
5 files changed, 109 insertions, 15 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index bce83583cc..a9b5a3c3f4 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -155,7 +155,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
std::copy(tensorData.begin(), tensorData.end(), inputData);
}
- else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
+ else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0 ||
+ params.m_InputTypes[inputIndex].compare("qasymmu8") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
@@ -175,6 +176,26 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
std::copy(tensorData.begin(), tensorData.end(), inputData);
}
+ else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
+ {
+ auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
+
+ if(inputData == NULL)
+ {
+ ARMNN_LOG(fatal) << "Input tensor is null, input type: "
+ "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
+ return EXIT_FAILURE;
+ }
+
+ std::vector<int8_t> tensorData;
+ PopulateTensorWithDataGeneric<int8_t>(tensorData,
+ params.m_InputTensorShapes[inputIndex]->GetNumElements(),
+ dataFile,
+ [](const std::string& s)
+ { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+
+ std::copy(tensorData.begin(), tensorData.end(), inputData);
+ }
else
{
ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
@@ -245,7 +266,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
printf("%d ", tfLiteDelageOutputData[i]);
}
}
- else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
+ else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 ||
+ params.m_OutputTypes[outputIndex].compare("qasymmu8") == 0)
{
auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
if(tfLiteDelageOutputData == NULL)
@@ -374,13 +396,17 @@ int MainImpl(const ExecuteNetworkParams& params,
if (params.m_OutputTypes[i].compare("float") == 0)
{
outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
- } else if (params.m_OutputTypes[i].compare("int") == 0)
+ }
+ else if (params.m_OutputTypes[i].compare("int") == 0)
{
outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
- } else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
+ }
+ else if (params.m_OutputTypes[i].compare("qasymm8") == 0 ||
+ params.m_OutputTypes[i].compare("qasymmu8") == 0)
{
outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
- } else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
+ }
+ else if (params.m_OutputTypes[i].compare("qasymms8") == 0)
{
outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
} else
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 25ddecf3ba..b12547f51c 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -232,7 +232,7 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
("q,quantize-input",
- "If this option is enabled, all float inputs will be quantized to qasymm8. "
+ "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
"If unset, default to not quantized. Accepted values (true or false)",
cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
@@ -264,13 +264,13 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("y,input-type",
"The type of the input tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined inputs. "
- "Accepted values (float, int or qasymm8).",
+ "Accepted values (float, int, qasymms8 or qasymmu8).",
cxxopts::value<std::string>())
("z,output-type",
"The type of the output tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined outputs. "
- "Accepted values (float, int or qasymm8).",
+ "Accepted values (float, int, qasymms8 or qasymmu8).",
cxxopts::value<std::string>())
("T,tflite-executor",
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index a2110f9cf3..b4432558c4 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -164,15 +164,16 @@ public:
("f,model-format",
"Format of the intended model file that uses the images."
"Different formats have different image normalization styles."
+ "If unset, defaults to tflite."
"Accepted value (tflite)",
- cxxopts::value<std::string>(m_ModelFormat))
+ cxxopts::value<std::string>(m_ModelFormat)->default_value("tflite"))
("o,outfile",
"Output raw tensor file path",
cxxopts::value<std::string>(m_OutputFileName))
("z,output-type",
"The data type of the output tensors."
"If unset, defaults to \"float\" for all defined inputs. "
- "Accepted values (float, int or qasymm8)",
+ "Accepted values (float, int, qasymms8 or qasymmu8)",
cxxopts::value<std::string>(m_OutputType)->default_value("float"))
("new-width",
"Resize image to new width. Keep original width if unspecified",
@@ -254,10 +255,14 @@ public:
{
return armnn::DataType::Signed32;
}
- else if (m_OutputType == "qasymm8")
+ else if (m_OutputType == "qasymm8" || m_OutputType == "qasymmu8")
{
return armnn::DataType::QAsymmU8;
}
+ else if (m_OutputType == "qasymms8")
+ {
+ return armnn::DataType::QAsymmS8;
+ }
else
{
throw armnn::Exception("Unsupported input type" + m_OutputType);
@@ -292,7 +297,8 @@ int main(int argc, char* argv[])
const unsigned int batchSize = 1;
const armnn::DataLayout outputLayout(cmdline.GetLayout());
- using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>,
+ std::vector<int8_t>>;
std::vector<TContainer> imageDataContainers;
const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
try
@@ -307,6 +313,10 @@ int main(int argc, char* argv[])
imageDataContainers.push_back(PrepareImageTensor<uint8_t>(
imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
break;
+ case armnn::DataType::QAsymmS8:
+ imageDataContainers.push_back(PrepareImageTensor<int8_t>(
+ imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
+ break;
case armnn::DataType::Float32:
default:
imageDataContainers.push_back(PrepareImageTensor<float>(
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index 5aa2ca8124..6d2e549360 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -56,6 +56,10 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode
normParams.mean = { 128.0, 128.0, 128.0 };
break;
case armnn::DataType::QAsymmU8:
+ break;
+ case armnn::DataType::QAsymmS8:
+ normParams.mean = { 128.0, 128.0, 128.0 };
+ break;
default:
break;
}
@@ -138,7 +142,7 @@ std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
return imageDataInt;
}
-// Prepare qasymm8 image tensor
+// Prepare qasymmu8 image tensor
template <>
std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
unsigned int newWidth,
@@ -158,6 +162,26 @@ std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
return imageDataQasymm8;
}
+// Prepare qasymms8 image tensor
+template <>
+std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
+ unsigned int newWidth,
+ unsigned int newHeight,
+ const NormalizationParameters& normParams,
+ unsigned int batchSize,
+ const armnn::DataLayout& outputLayout)
+{
+ // Get float32 image tensor
+ std::vector<float> imageDataFloat =
+ PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
+ std::vector<int8_t> imageDataQasymms8;
+ imageDataQasymms8.reserve(imageDataFloat.size());
+ // Convert to uint8 image tensor with static cast
+ std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
+ [](float val) { return static_cast<uint8_t>(val); });
+ return imageDataQasymms8;
+}
+
/** Write image tensor to ofstream
*
* @param[in] imageData Image tensor data
@@ -176,3 +200,11 @@ void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::o
{
std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
}
+
+// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
+// numerical values
+template <>
+void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
+{
+ std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
+} \ No newline at end of file
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 23b892ffb4..0906c1cf3f 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -40,6 +40,13 @@ auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
}
template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
+{
+ return ParseArrayImpl<int8_t>(stream,
+ [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+}
+
+template<>
auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
{
return ParseArrayImpl<uint8_t>(stream,
@@ -54,7 +61,20 @@ auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
[](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
}
-
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
+ const float& quantizationScale,
+ const int32_t& quantizationOffset)
+{
+ return ParseArrayImpl<int8_t>(stream,
+ [&quantizationScale, &quantizationOffset](const std::string& s)
+ {
+ return armnn::numeric_cast<int8_t>(
+ armnn::Quantize<int8_t>(std::stof(s),
+ quantizationScale,
+ quantizationOffset));
+ });
+}
template<>
auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
@@ -232,12 +252,18 @@ void PopulateTensorWithData(TContainer& tensorData,
ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
}
- else if (dataTypeStr.compare("qasymm8") == 0)
+ else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0)
{
tensorData = readFromFile ?
ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
}
+ else if (dataTypeStr.compare("qasymms8") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
+ }
else
{
std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;