aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-02-22 15:13:12 +0000
committerJim Flynn <jim.flynn@arm.com>2021-03-03 17:06:43 +0000
commitf806c4d075814a9dc9d206a4db123d3060ad7ebd (patch)
treea110c106598a6830d0862526742314f5048b8acb
parent82c59d75ef0f191887fae1cc2864bbf4b37ac0c5 (diff)
downloadarmnn-experimental/abi-tests.tar.gz
IVGCVSW-5612 Fix tiny_wav2letter_relu_fixed_int8 delegate outputexperimental/abi-tests
* fix delegate perchannel quantization * change delegate to check reshape options before inputs * Add int8 "qsymms8" option to ExecuteNetwork * Add option to run ExecuteNetwork on tflite w/o delegate !referencetests:301301 Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: If3e12599b17aff1199d7ab0a55e1c901e480083d
-rw-r--r--delegate/src/DelegateUtils.hpp2
-rw-r--r--delegate/src/Redefine.hpp43
-rw-r--r--src/armnn/NetworkQuantizer.cpp3
-rw-r--r--src/armnn/test/ModelAccuracyCheckerTest.cpp3
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp87
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp8
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp48
-rw-r--r--tests/InferenceModel.hpp3
-rw-r--r--tests/InferenceTest.hpp3
-rw-r--r--tests/InferenceTest.inl11
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp28
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp5
12 files changed, 163 insertions, 81 deletions
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 17261e4d40..1b5f1e369e 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -446,7 +446,7 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
if (affineQuantization->scale->size > 1)
{
std::vector<float> quantizationScales;
- for (unsigned int i = 1; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
+ for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
{
quantizationScales.push_back(affineQuantization->scale->data[i]);
}
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index 5e130b27f2..3df26cacc3 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -83,10 +83,19 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
armnn::ReshapeDescriptor reshapeDesc;
std::vector<int32_t> targetShape;
- bool shapeSet = false;
+
+ TfLiteReshapeParams* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
// The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
- if (numInputs == 2)
+ // Options might be set without valid data. we need to check the dimensions are in a valid range.
+ if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
+ {
+ for (int i=0; i < reshapeOptions->num_dimensions; ++i)
+ {
+ targetShape.push_back(reshapeOptions->shape[i]);
+ }
+ }
+ else if (numInputs == 2)
{
// Get shape from the second input tensor
const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
@@ -111,33 +120,15 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
{
targetShape.push_back(*(shapeTensorDataPtr+i));
}
- shapeSet = true;
}
}
- if (!shapeSet)
+ else
{
- // Get shape from the builtin data
- TfLiteReshapeParams* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
-
- if (reshapeOptions != nullptr)
- {
- // Options might be set without valid data. we need to check the dimensions are in a valid range.
- if (reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
- {
- for (int i=0; i < reshapeOptions->num_dimensions; ++i)
- {
- targetShape.push_back(reshapeOptions->shape[i]);
- }
- }
- }
- else
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "Target shape not defined in reshape parameters or input tensor. "
- "At least one method required in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "Target shape not defined in reshape parameters or input tensor. "
+ "At least one method required in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
}
// Use the data to create the required tensor shape.
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp
index 06d8c5d0f2..fd4486bc31 100644
--- a/src/armnn/NetworkQuantizer.cpp
+++ b/src/armnn/NetworkQuantizer.cpp
@@ -31,7 +31,8 @@
namespace armnn
{
-using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
INetworkQuantizer* INetworkQuantizer::CreateRaw(INetwork* inputNetwork, const QuantizerOptions& options)
{
diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp
index 55ac19a406..93dba7e75e 100644
--- a/src/armnn/test/ModelAccuracyCheckerTest.cpp
+++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp
@@ -52,7 +52,8 @@ struct TestHelper
BOOST_AUTO_TEST_SUITE(ModelAccuracyCheckerTest)
-using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper)
{
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index e3ca22e0ff..f812e53e04 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -54,18 +54,27 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
builder(&tfLiteInterpreter);
tfLiteInterpreter->AllocateTensors();
- // Create the Armnn Delegate
- armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- // Register armnn_delegate to TfLiteInterpreter
- int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
- if (status == kTfLiteError)
+ int status = 0;
+ if (params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate)
{
- ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
- return EXIT_FAILURE;
+ // Create the Armnn Delegate
+ armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ // Register armnn_delegate to TfLiteInterpreter
+ status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+ if (status == kTfLiteError)
+ {
+ ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
+ return EXIT_FAILURE;
+ }
}
+ else
+ {
+ std::cout << "Running on TfLite without ArmNN delegate\n";
+ }
+
std::vector<std::string> inputBindings;
for (const std::string& inputName: params.m_InputNames)
@@ -110,7 +119,7 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
std::copy(tensorData.begin(), tensorData.end(), inputData);
}
- else if (params.m_InputTypes[inputIndex].compare("int8") == 0)
+ else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
@@ -180,7 +189,7 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
for (size_t x = 0; x < params.m_Iterations; x++)
{
// Run the inference
- tfLiteInterpreter->Invoke();
+ status = tfLiteInterpreter->Invoke();
// Print out the output
for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
@@ -207,11 +216,7 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
for (int i = 0; i < outputSize; ++i)
{
- std::cout << tfLiteDelageOutputData[i] << ", ";
- if (i % 60 == 0)
- {
- std::cout << std::endl;
- }
+ printf("%f ", tfLiteDelageOutputData[i]);
}
}
else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
@@ -226,14 +231,10 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
for (int i = 0; i < outputSize; ++i)
{
- std::cout << tfLiteDelageOutputData[i] << ", ";
- if (i % 60 == 0)
- {
- std::cout << std::endl;
- }
+ printf("%d ", tfLiteDelageOutputData[i]);
}
}
- else if (params.m_OutputTypes[outputIndex].compare("int8") == 0)
+ else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
{
auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
if(tfLiteDelageOutputData == NULL)
@@ -245,11 +246,7 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
for (int i = 0; i < outputSize; ++i)
{
- std::cout << signed(tfLiteDelageOutputData[i]) << ", ";
- if (i % 60 == 0)
- {
- std::cout << std::endl;
- }
+ printf("%d ", tfLiteDelageOutputData[i]);
}
}
else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
@@ -264,11 +261,7 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
for (int i = 0; i < outputSize; ++i)
{
- std::cout << unsigned(tfLiteDelageOutputData[i]) << ", ";
- if (i % 60 == 0)
- {
- std::cout << std::endl;
- }
+ printf("%u ", tfLiteDelageOutputData[i]);
}
}
else
@@ -289,7 +282,8 @@ template<typename TParser, typename TDataType>
int MainImpl(const ExecuteNetworkParams& params,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
{
- using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
std::vector<TContainer> inputDataContainers;
@@ -383,6 +377,10 @@ int MainImpl(const ExecuteNetworkParams& params,
{
outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
}
+ else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
+ {
+ outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
+ }
else
{
ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
@@ -503,8 +501,19 @@ int main(int argc, const char* argv[])
}
else if(modelFormat.find("tflite") != std::string::npos)
{
-
- if (ProgramOptions.m_ExNetParams.m_EnableDelegate)
+ if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser)
+ {
+ #if defined(ARMNN_TF_LITE_PARSER)
+ return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
+ #else
+ ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
+ return EXIT_FAILURE;
+ #endif
+ }
+ else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
+ ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate ||
+ ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
+ ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter)
{
#if defined(ARMNN_TF_LITE_DELEGATE)
return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
@@ -513,12 +522,6 @@ int main(int argc, const char* argv[])
return EXIT_FAILURE;
#endif
}
- #if defined(ARMNN_TF_LITE_PARSER)
- return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
- #else
- ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
- return EXIT_FAILURE;
- #endif
}
else
{
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index a30ce57147..a19eaa9346 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -14,6 +14,13 @@ struct ExecuteNetworkParams
{
using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
+ enum class TfLiteExecutor
+ {
+ ArmNNTfLiteParser,
+ ArmNNTfLiteDelegate,
+ TfliteInterpreter
+ };
+
std::string m_CachedNetworkFilePath;
std::vector<armnn::BackendId> m_ComputeDevices;
bool m_DequantizeOutput;
@@ -47,6 +54,7 @@ struct ExecuteNetworkParams
int m_TuningLevel;
std::string m_TuningPath;
std::string m_MLGOTuningFilePath;
+ TfLiteExecutor m_TfLiteExecutor;
// Ensures that the parameters for ExecuteNetwork fit together
void ValidateParams();
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index ba28dd0173..62057eaef2 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -177,10 +177,6 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"tensorflow-text.",
cxxopts::value<std::string>())
- ("D,armnn-tflite-delegate",
- "enable Arm NN TfLite delegate",
- cxxopts::value<bool>(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true"))
-
("m,model-path",
"Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
@@ -271,7 +267,19 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"The type of the output tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined outputs. "
"Accepted values (float, int or qasymm8).",
- cxxopts::value<std::string>());
+ cxxopts::value<std::string>())
+
+ ("T,tflite-executor",
+ "Set the executor for the tflite model: parser, delegate, tflite"
+ "parser is the ArmNNTfLiteParser, "
+ "delegate is the ArmNNTfLiteDelegate, "
+ "tflite is the TfliteInterpreter",
+ cxxopts::value<std::string>()->default_value("parser"))
+
+ ("D,armnn-tflite-delegate",
+ "Enable Arm NN TfLite delegate. "
+ "This option is depreciated please use tflite-executor instead",
+ cxxopts::value<bool>(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true"));
m_CxxOptions.add_options("c) Optimization")
("bf16-turbo-mode",
@@ -409,6 +417,36 @@ void ProgramOptions::ParseOptions(int ac, const char* av[])
m_ExNetParams.m_InputTensorDataFilePaths.empty();
m_ExNetParams.m_DynamicBackendsPath = m_RuntimeOptions.m_DynamicBackendsPath;
+
+ std::string tfliteExecutor = GetOptionValue<std::string>("tflite-executor", m_CxxResult);
+
+ if (tfliteExecutor.size() == 0 || tfliteExecutor == "parser")
+ {
+ m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser;
+ }
+ else if (tfliteExecutor == "delegate")
+ {
+ m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
+ }
+ else if (tfliteExecutor == "tflite")
+ {
+ m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter;
+ }
+ else
+ {
+ ARMNN_LOG(info) << fmt::format("Invalid tflite-executor option '{}'.", tfliteExecutor);
+ throw armnn::InvalidArgumentException ("Invalid tflite-executor option");
+ }
+
+ if (m_ExNetParams.m_EnableDelegate)
+ {
+ m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
+ ARMNN_LOG(info) << fmt::format("armnn-tflite-delegate option is being depreciated, "
+ "please use tflite-executor instead.");
+ }
+
+
+
// Parse input tensor shape from the string we got from the command-line.
std::vector<std::string> inputTensorShapesVector =
ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 79962623cb..6bfad067ca 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -335,7 +335,8 @@ public:
using DataType = TDataType;
using Params = InferenceModelInternal::Params;
using QuantizationParams = InferenceModelInternal::QuantizationParams;
- using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer
+ = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
struct CommandLineOptions
{
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 5ec744ca7e..0cc6c3bdca 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -110,7 +110,8 @@ template <typename TModel>
class InferenceModelTestCase : public IInferenceTestCase
{
public:
- using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
InferenceModelTestCase(TModel& model,
unsigned int testCaseId,
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 3d6dae335a..79700d991b 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -26,7 +26,8 @@ namespace armnn
namespace test
{
-using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
template <typename TTestCaseDatabase, typename TModel>
ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
@@ -66,6 +67,14 @@ struct ClassifierResultProcessor
});
}
+ void operator()(const std::vector<int8_t>& values)
+ {
+ SortPredictions(values, [](int8_t value)
+ {
+ return value;
+ });
+ }
+
void operator()(const std::vector<uint8_t>& values)
{
auto& scale = m_Scale;
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 2afd941636..d902d23d86 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -52,6 +52,16 @@ auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
[](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
}
+
+template<>
+auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
+{
+ return ParseArrayImpl<int8_t>(stream,
+ [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+}
+
+
+
template<>
auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
const float& quantizationScale,
@@ -130,6 +140,15 @@ void TensorPrinter::operator()(const std::vector<uint8_t>& values)
}
}
+void TensorPrinter::operator()(const std::vector<int8_t>& values)
+{
+ ForEachValue(values, [](int8_t value)
+ {
+ printf("%d ", value);
+ });
+ WriteToFile(values);
+}
+
void TensorPrinter::operator()(const std::vector<int>& values)
{
ForEachValue(values, [](int value)
@@ -170,7 +189,8 @@ void TensorPrinter::WriteToFile(const std::vector<T>& values)
}
}
-using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
using QuantizationParams = std::pair<float, int32_t>;
void PopulateTensorWithData(TContainer& tensorData,
@@ -212,6 +232,12 @@ void PopulateTensorWithData(TContainer& tensorData,
ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
}
+ else if (dataTypeStr.compare("qsymms8") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
+ }
else if (dataTypeStr.compare("qasymm8") == 0)
{
tensorData = readFromFile ?
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 742f968a7a..d92c17c5e5 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -34,6 +34,8 @@ struct TensorPrinter
void operator()(const std::vector<int>& values);
+ void operator()(const std::vector<int8_t>& values);
+
private:
template<typename Container, typename Delegate>
void ForEachValue(const Container& c, Delegate delegate);
@@ -48,7 +50,8 @@ private:
bool m_DequantizeOutput;
};
-using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
using QuantizationParams = std::pair<float, int32_t>;
void PopulateTensorWithData(TContainer& tensorData,