aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork
diff options
context:
space:
mode:
Diffstat (limited to 'tests/ExecuteNetwork')
-rw-r--r--tests/ExecuteNetwork/ArmNNExecutor.cpp155
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp2
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp7
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp12
-rw-r--r--tests/ExecuteNetwork/FileComparisonExecutor.cpp95
-rw-r--r--tests/ExecuteNetwork/TfliteExecutor.cpp96
6 files changed, 334 insertions, 33 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 4518f1426f..ece3dafea4 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -7,27 +7,157 @@
#include "ArmNNExecutor.hpp"
#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
-#include <armnn/IAsyncExecutionCallback.hpp>
#include <AsyncExecutionCallback.hpp>
-
-
+#include <armnn/IAsyncExecutionCallback.hpp>
+#if defined(ARMNN_SERIALIZER)
+#include <armnnSerializer/ISerializer.hpp>
+#endif
using namespace armnn;
using namespace std::chrono;
+#if defined(ARMNN_SERIALIZER)
+/**
+ * Given a reference to an INetwork and a target directory, serialize the network to a file
+ * called "<timestamp>_network.armnn"
+ *
+ * @param network The network to serialize.
+ * @param dumpDir The target directory.
+ * @return the full path to the serialized file.
+ */
+std::string SerializeNetwork(const armnn::INetwork& network, const std::string& dumpDir)
+{
+ if (dumpDir.empty())
+ {
+ throw InvalidArgumentException("An output directory must be specified.");
+ }
+ fs::path outputDirectory(dumpDir);
+ if (!exists(outputDirectory))
+ {
+ throw InvalidArgumentException(
+ fmt::format("The specified directory does not exist: {}", outputDirectory.c_str()));
+ }
+ auto serializer(armnnSerializer::ISerializer::Create());
+ // Serialize the Network
+ serializer->Serialize(network);
+
+ fs::path fileName;
+ fileName += dumpDir;
+ // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
+ // and getSupportedOperations.txt files)
+ timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) == 0)
+ {
+ std::stringstream ss;
+ ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec) << "_network.armnn";
+ fileName += ss.str();
+ }
+ else
+ {
+ // This is incredibly unlikely but just in case.
+ throw RuntimeException("clock_gettime, CLOCK_MONOTONIC_RAW returned a non zero result.");
+ }
+
+ // Save serialized network to a file
+ std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
+ auto serialized = serializer->SaveSerializedToStream(serializedFile);
+ if (!serialized)
+ {
+ throw RuntimeException(fmt::format("An error occurred when serializing to file %s", fileName.c_str()));
+ }
+ serializedFile.flush();
+ serializedFile.close();
+ return fileName;
+}
+
+/**
+ * Given a reference to an optimized network and a target directory, serialize the network in .dot file format to
+ * a file called "<timestamp>_optimized_networkgraph.dot"
+ *
+ * @param network The network to serialize.
+ * @param dumpDir The target directory.
+ * @return the full path to the serialized file.
+ */
+std::string SerializeNetworkToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork, const std::string& dumpDir)
+{
+ if (dumpDir.empty())
+ {
+ throw InvalidArgumentException("An output directory must be specified.");
+ }
+ fs::path outputDirectory(dumpDir);
+ if (!exists(outputDirectory))
+ {
+ throw InvalidArgumentException(
+ fmt::format("The specified directory does not exist: {}", outputDirectory.c_str()));
+ }
+
+ fs::path fileName;
+ fileName += dumpDir;
+ // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
+ // and getSupportedOperations.txt files)
+ timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts) == 0)
+ {
+ std::stringstream ss;
+ ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec) << "_optimized_networkgraph.dot";
+ fileName += ss.str();
+ }
+ else
+ {
+ // This is incredibly unlikely but just in case.
+ throw RuntimeException("clock_gettime, CLOCK_MONOTONIC_RAW returned a non zero result.");
+ }
+
+ // Write the network graph to a dot file.
+ std::ofstream fileStream;
+ fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
+ if (!fileStream.good())
+ {
+ throw RuntimeException(fmt::format("An error occurred when creating %s", fileName.c_str()));
+ }
+
+ if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
+ {
+ throw RuntimeException(fmt::format("An error occurred when serializing to file %s", fileName.c_str()));
+ }
+ fileStream.flush();
+ fileStream.close();
+ return fileName;
+}
+#endif
+
ArmNNExecutor::ArmNNExecutor(const ExecuteNetworkParams& params, armnn::IRuntime::CreationOptions runtimeOptions)
-: m_Params(params)
+ : m_Params(params)
{
- runtimeOptions.m_EnableGpuProfiling = params.m_EnableProfiling;
+ runtimeOptions.m_EnableGpuProfiling = params.m_EnableProfiling;
runtimeOptions.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
// Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all ArmNNExecutor
// instances so the RuntimeOptions cannot be altered for different ArmNNExecutor instances.
m_Runtime = GetRuntime(runtimeOptions);
- auto parser = CreateParser();
+ auto parser = CreateParser();
auto network = parser->CreateNetwork(m_Params);
- auto optNet = OptimizeNetwork(network.get());
+ auto optNet = OptimizeNetwork(network.get());
+ // If the user has asked for detailed data write out the .armnn amd .dot files.
+ if (params.m_SerializeToArmNN)
+ {
+#if defined(ARMNN_SERIALIZER)
+ // .armnn first.
+ // This could throw multiple exceptions if the directory cannot be created or the file cannot be written.
+ std::string targetDirectory(armnnUtils::Filesystem::CreateDirectory("/ArmNNSerializeNetwork"));
+ std::string fileName;
+ fileName = SerializeNetwork(*network, targetDirectory);
+ ARMNN_LOG(info) << "The pre-optimized network has been serialized to:" << fileName;
+ // and the .dot file.
+ // Most of the possible exceptions should have already occurred with the .armnn file.
+ fileName =
+ SerializeNetworkToDotFile(*optNet, targetDirectory);
+ ARMNN_LOG(info) << "The optimized network has been serialized to:" << fileName;
+#else
+ ARMNN_LOG(info) << "Arm NN has not been built with ARMNN_SERIALIZER enabled.";
+#endif
+ }
m_IOInfo = GetIOInfo(optNet.get());
armnn::ProfilingDetailsMethod profilingDetailsMethod = ProfilingDetailsMethod::Undefined;
@@ -97,6 +227,9 @@ ArmNNExecutor::~ArmNNExecutor()
{
profiler->Print(std::cout);
}
+
+ // We're finished with the network.
+ m_Runtime->UnloadNetwork(m_NetworkId);
}
void ArmNNExecutor::ExecuteAsync()
@@ -176,6 +309,12 @@ void ArmNNExecutor::ExecuteAsync()
void ArmNNExecutor::ExecuteSync()
{
+ // If we've only been asked to serialize the networks, don't execute the inference.
+ if (m_Params.m_SerializeToArmNN)
+ {
+ ARMNN_LOG(info) << "serialize-to-armnn has been specified. No inference will be executed.";
+ return;
+ }
for (size_t x = 0; x < m_Params.m_Iterations; x++)
{
std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
@@ -800,6 +939,7 @@ armnn::BindingPointInfo ArmNNExecutor::TfliteParser::GetOutputBindingPointInfo(s
#if defined(ARMNN_ONNX_PARSER)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
ArmNNExecutor::OnnxParser::OnnxParser() : m_Parser(armnnOnnxParser::IOnnxParser::Create()){}
armnn::INetworkPtr ArmNNExecutor::OnnxParser::CreateNetwork(const ExecuteNetworkParams& params)
@@ -843,4 +983,5 @@ armnn::BindingPointInfo ArmNNExecutor::OnnxParser::GetOutputBindingPointInfo(siz
{
return m_Parser->GetNetworkOutputBindingInfo(outputName);
}
+ARMNN_NO_DEPRECATE_WARN_END
#endif
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 4f5a290a0d..ccd64f531e 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -109,7 +109,7 @@ void ExecuteNetworkParams::ValidateParams()
}
}
-#if defined(ARMNN_TFLITE_DELEGATE)
+#if defined(ARMNN_TFLITE_DELEGATE)||defined(ARMNN_TFLITE_OPAQUE_DELEGATE)
/**
* A utility method that populates a DelegateOptions object from this ExecuteNetworkParams.
*
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index ffcb4f482c..5eaae24b5e 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,7 +8,7 @@
#include <armnn/BackendId.hpp>
#include <armnn/Tensor.hpp>
-#if defined(ARMNN_TFLITE_DELEGATE)
+#if defined(ARMNN_TFLITE_DELEGATE)||defined(ARMNN_TFLITE_OPAQUE_DELEGATE)
#include <DelegateOptions.hpp>
#endif
@@ -67,10 +67,11 @@ struct ExecuteNetworkParams
std::string m_ComparisonFile;
std::vector<armnn::BackendId> m_ComparisonComputeDevices;
bool m_CompareWithTflite;
+ bool m_SerializeToArmNN;
// Ensures that the parameters for ExecuteNetwork fit together
void ValidateParams();
-#if defined(ARMNN_TFLITE_DELEGATE)
+#if defined(ARMNN_TFLITE_DELEGATE)||defined(ARMNN_TFLITE_OPAQUE_DELEGATE)
/// A utility method that populates a DelegateOptions object from this ExecuteNetworkParams.
armnnDelegate::DelegateOptions ToDelegateOptions() const;
#endif
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 5c1337f769..87b38c5f78 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -216,7 +216,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("m,model-path",
"Path to model file, e.g. .armnn, .tflite, .onnx. "
- "DEPRECATED: .pb and .prototxt model files no longer load and are deprecated.",
+ "DEPRECATED: .pb and .prototxt model files no longer loaded and are deprecated."
+ "DEPRECATED: .onnx model files will no longer loaded from 24.08 onwards.",
cxxopts::value<std::string>(m_ExNetParams.m_ModelPath));
m_CxxOptions.add_options("b) Ordering")
@@ -237,7 +238,8 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
("P, thread-pool-size",
- "Run the network using the Arm NN thread pool with the number of threads provided. ",
+ "Run the network using the Arm NN thread pool with the number of threads provided. "
+ "DECRECATED: The asynchronous execution interface will be removed in 24.08",
cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"))
("d,input-tensor-data",
@@ -351,6 +353,12 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"Perform an per byte root mean square error calculation of the output of the inference with"
" the tflite ref model.",
cxxopts::value<bool>(m_ExNetParams.m_CompareWithTflite)->default_value("false")
+ ->implicit_value("true"))
+ ("serialize-to-armnn",
+ "Serialize the loaded network to an .armnn file. This option will also serialize the optimized network"
+ " in dot format. This option only works with both the TfLite parser and the Arm NN serializer"
+ " enabled in the build. An inference will NOT be executed.",
+ cxxopts::value<bool>(m_ExNetParams.m_SerializeToArmNN)->default_value("false")
->implicit_value("true"));
m_CxxOptions.add_options("d) Optimization")
diff --git a/tests/ExecuteNetwork/FileComparisonExecutor.cpp b/tests/ExecuteNetwork/FileComparisonExecutor.cpp
index 844004bf71..26d535a958 100644
--- a/tests/ExecuteNetwork/FileComparisonExecutor.cpp
+++ b/tests/ExecuteNetwork/FileComparisonExecutor.cpp
@@ -1,14 +1,18 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FileComparisonExecutor.hpp"
#include <NetworkExecutionUtils/NetworkExecutionUtils.hpp>
+#include <armnn/Numpy.hpp>
#include <algorithm>
#include <ghc/filesystem.hpp>
#include <iterator>
+// File limit size of 1Mb
+constexpr uint32_t MAX_FILE_SIZE = 1048576;
+
using namespace armnn;
/**
@@ -173,11 +177,10 @@ Tensor ReadTensorFromFile(const std::string fileName)
{
throw FileNotFoundException("The file \"" + fileName + "\" could not be found.");
}
- // The format we are reading in is based on NetworkExecutionUtils::WriteToFile. This could potentially
- // be an enormous tensor. We'll limit what we can read in to 1Mb.
- std::uintmax_t maxFileSize = 1048576;
- std::uintmax_t fileSize = ghc::filesystem::file_size(fileName);
- if (fileSize > maxFileSize)
+ // The format we are reading in is based on NetworkExecutionUtils::WriteToFile.
+ // This could potentially be an enormous tensor.
+ std::uintmax_t fileSize = ghc::filesystem::file_size(fileName);
+ if (fileSize > MAX_FILE_SIZE)
{
throw InvalidArgumentException("The file \"" + fileName + "\" exceeds max size of 1 Mb.");
}
@@ -245,6 +248,75 @@ Tensor ReadTensorFromFile(const std::string fileName)
return result;
}
+/**
+ * Open the given file and read the data out of it to construct a Tensor. This could throw FileNotFoundException
+ * or InvalidArgumentException
+ *
+ * @param fileName the file to be read.
+ * @return a populated tensor.
+ */
+Tensor ReadTensorFromNumpyFile(const std::string fileName)
+{
+ if (!ghc::filesystem::exists(fileName))
+ {
+ throw FileNotFoundException("The file \"" + fileName + "\" could not be found.");
+ }
+ // The format we are reading in is based on NetworkExecutionUtils::WriteToFile. This could potentially
+ // be an enormous tensor. We'll limit what we can read in to 1Mb.
+ std::uintmax_t fileSize = ghc::filesystem::file_size(fileName);
+ if (fileSize > MAX_FILE_SIZE)
+ {
+ throw InvalidArgumentException("The file \"" + fileName + "\" exceeds max size of 1 Mb.");
+ }
+
+ std::ifstream ifStream(fileName, std::ios::binary);
+ armnnNumpy::HeaderInfo headerInfo;
+ armnnNumpy::Header header;
+
+ CreateHeaderInfo(ifStream, headerInfo);
+ CreateHeader(ifStream, headerInfo, header);
+ uint32_t numElements = armnnNumpy::getNumElements(header);
+
+ switch (armnnNumpy::getArmNNDataType(header.m_DescrString))
+ {
+ case DataType::Float32: {
+ float* floats = new float[numElements];
+ armnnNumpy::ReadData<float>(ifStream, floats, numElements);
+ TensorInfo info({ numElements, 1, 1, 1 }, DataType::Float32);
+ return Tensor(info, floats);
+ }
+ case DataType::Signed32: {
+ int* ints = new int[numElements];
+ armnnNumpy::ReadData<int>(ifStream, ints, numElements);
+ TensorInfo info({ numElements, 1, 1, 1 }, DataType::Signed32);
+ return Tensor(info, ints);
+ }
+ case DataType::QSymmS8: {
+ int8_t* ints = new int8_t[numElements];
+ armnnNumpy::ReadData<int8_t>(ifStream, ints, numElements);
+ TensorInfo info({ numElements, 1, 1, 1 }, DataType::QSymmS8);
+ return Tensor(info, ints);
+ }
+ case DataType::QAsymmS8: {
+ int8_t* ints = new int8_t[numElements];
+ armnnNumpy::ReadData<int8_t>(ifStream, ints, numElements);
+ TensorInfo info({ numElements, 1, 1, 1 }, DataType::QAsymmS8);
+ return Tensor(info, ints);
+ }
+ case DataType::QAsymmU8: {
+ uint8_t* ints = new uint8_t[numElements];
+ armnnNumpy::ReadData<uint8_t>(ifStream, ints, numElements);
+ TensorInfo info({ numElements, 1, 1, 1 }, DataType::QAsymmU8);
+ return Tensor(info, ints);
+ }
+ default:
+ throw InvalidArgumentException("The tensor data could not be read from \"" + fileName + "\"");
+ }
+
+ Tensor result;
+ return result;
+}
+
FileComparisonExecutor::FileComparisonExecutor(const ExecuteNetworkParams& params)
: m_Params(params)
{}
@@ -284,7 +356,16 @@ std::vector<const void*> FileComparisonExecutor::Execute()
std::vector<const void*> results;
for (auto file : fileNames)
{
- Tensor t = ReadTensorFromFile(file);
+ Tensor t;
+ if (file.find(".npy") == std::string::npos)
+ {
+ t = ReadTensorFromFile(file);
+ }
+ else
+ {
+ t = ReadTensorFromNumpyFile(file);
+ }
+
outputs.push_back({ 0, Tensor(t.GetInfo(), t.GetMemoryArea()) });
results.push_back(t.GetMemoryArea());
}
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp
index 433d538b78..7a4ff55237 100644
--- a/tests/ExecuteNetwork/TfliteExecutor.cpp
+++ b/tests/ExecuteNetwork/TfliteExecutor.cpp
@@ -9,7 +9,9 @@
#include <tensorflow/lite/core/c/c_api.h>
#include "TfliteExecutor.hpp"
-#include "tensorflow/lite/kernels/kernel_util.h"
+#include <tensorflow/lite/kernels/kernel_util.h>
+
+#include <../delegate/common/src/DelegateUtils.hpp>
#include <chrono>
#include <string>
@@ -246,8 +248,10 @@ std::vector<const void *> TfLiteExecutor::Execute()
TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
// If we've been asked to write to a file then set a file output stream. Otherwise use stdout.
FILE* outputTensorFile = stdout;
+ bool isNumpyOutput = false;
if (!m_Params.m_OutputTensorFiles.empty())
{
+ isNumpyOutput = m_Params.m_OutputTensorFiles[outputIndex].find(".npy") != std::string::npos;
outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w");
if (outputTensorFile == NULL)
{
@@ -256,29 +260,43 @@ std::vector<const void *> TfLiteExecutor::Execute()
}
else
{
- ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x + 1
+ ARMNN_LOG(info) << "Writing output " << outputIndex << " of iteration: " << x + 1
<< " to file: '" << m_Params.m_OutputTensorFiles[outputIndex] << "'";
}
}
+
long outputSize = 1;
for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
{
outputSize *= outputDims->data[dim];
}
+ armnn::TensorShape shape(static_cast<unsigned int>(outputDims->size), outputDims->data);
+ armnn::DataType dataType(GetDataType(*m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)));
+
std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
{
-
case kTfLiteFloat32:
{
auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(
tfLiteDelegateOutputId);
results.push_back(tfLiteDelegateOutputData);
- for (int i = 0; i < outputSize; ++i)
+ if (isNumpyOutput)
+ {
+ armnnNumpy::WriteToNumpyFile(m_Params.m_OutputTensorFiles[outputIndex],
+ tfLiteDelegateOutputData,
+ outputSize,
+ dataType,
+ shape);
+ }
+ else
{
- fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
+ }
}
break;
}
@@ -287,10 +305,23 @@ std::vector<const void *> TfLiteExecutor::Execute()
auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(
tfLiteDelegateOutputId);
results.push_back(tfLiteDelegateOutputData);
- for (int i = 0; i < outputSize; ++i)
+
+ if (isNumpyOutput)
+ {
+ armnnNumpy::WriteToNumpyFile(m_Params.m_OutputTensorFiles[outputIndex],
+ tfLiteDelegateOutputData,
+ outputSize,
+ dataType,
+ shape);
+ }
+ else
{
- fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ }
}
+
break;
}
case kTfLiteUInt8:
@@ -298,10 +329,23 @@ std::vector<const void *> TfLiteExecutor::Execute()
auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(
tfLiteDelegateOutputId);
results.push_back(tfLiteDelegateOutputData);
- for (int i = 0; i < outputSize; ++i)
+
+ if (isNumpyOutput)
{
- fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
+ armnnNumpy::WriteToNumpyFile(m_Params.m_OutputTensorFiles[outputIndex],
+ tfLiteDelegateOutputData,
+ outputSize,
+ dataType,
+ shape);
}
+ else
+ {
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
+ }
+ }
+
break;
}
case kTfLiteInt8:
@@ -309,10 +353,23 @@ std::vector<const void *> TfLiteExecutor::Execute()
auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(
tfLiteDelegateOutputId);
results.push_back(tfLiteDelegateOutputData);
- for (int i = 0; i < outputSize; ++i)
+
+ if (isNumpyOutput)
+ {
+ armnnNumpy::WriteToNumpyFile(m_Params.m_OutputTensorFiles[outputIndex],
+ tfLiteDelegateOutputData,
+ outputSize,
+ dataType,
+ shape);
+ }
+ else
{
- fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
+ }
}
+
break;
}
case kTfLiteBool:
@@ -320,8 +377,21 @@ std::vector<const void *> TfLiteExecutor::Execute()
auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<bool>(
tfLiteDelegateOutputId);
results.push_back(tfLiteDelegateOutputData);
- for (int i = 0; i < outputSize; ++i) {
- fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
+
+ if (isNumpyOutput)
+ {
+ armnnNumpy::WriteToNumpyFile(m_Params.m_OutputTensorFiles[outputIndex],
+ tfLiteDelegateOutputData,
+ outputSize,
+ dataType,
+ shape);
+ }
+ else
+ {
+ for (int i = 0; i < outputSize; ++i)
+ {
+ fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
+ }
}
break;
}