aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <David.Monahan@arm.com>2021-11-01 10:16:37 +0000
committerDavid Monahan <david.monahan@arm.com>2021-11-01 16:32:24 +0000
commit2d9956162dd002a41f7fb4fa6753195d33524c7f (patch)
tree35b88472ff1bc5374c40365785c626cfb1ddec2c
parent6b9eba2f785093747f04af245da0cec7aca3931c (diff)
downloadarmnn-2d9956162dd002a41f7fb4fa6753195d33524c7f.tar.gz
IVGCVSW-6359 Added support for Float16 (Half) to Execute Network
* Allows the user to specify float16 as a datatype * Does not contain support for float16 on the TfLiteDelegate via ExecuteNetwork Signed-off-by: David Monahan <David.Monahan@arm.com> Change-Id: Icba56feedab32662e2cf671cc46ada899cf40c6c
-rw-r--r--include/armnnUtils/TContainer.hpp13
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp15
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp4
-rw-r--r--tests/InferenceTest.inl8
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp29
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp2
6 files changed, 64 insertions, 7 deletions
diff --git a/include/armnnUtils/TContainer.hpp b/include/armnnUtils/TContainer.hpp
index a55f9df488..d2a868ac18 100644
--- a/include/armnnUtils/TContainer.hpp
+++ b/include/armnnUtils/TContainer.hpp
@@ -6,15 +6,20 @@
#pragma once
#include <armnn/TypesUtils.hpp>
+#include <Half.hpp>
#include <mapbox/variant.hpp>
namespace armnnUtils
{
-// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
-// defining your own.
- using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
+// Standard declaration of TContainer used by ArmNN
+// Changes to this declaration constitute an api/abi break, new types should be added as a separate declaration and
+// merged on the next planned api/abi update.
+using TContainer = mapbox::util::variant<std::vector<float>,
+ std::vector<int>,
+ std::vector<uint8_t>,
+ std::vector<int8_t>,
+ std::vector<armnn::Half>>;
} // namespace armnnUtils
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index a0a08d31b0..0d5271158b 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -12,6 +12,7 @@
#include <armnnUtils/Filesystem.hpp>
#include <armnnUtils/TContainer.hpp>
#include <InferenceTest.hpp>
+#include <Half.hpp>
#if defined(ARMNN_SERIALIZER)
#include "armnnDeserializer/IDeserializer.hpp"
@@ -484,7 +485,7 @@ int MainImpl(const ExecuteNetworkParams& params,
armnn::DataType type = model.GetOutputBindingInfo(outputIdx).second.GetDataType();
switch (type)
{
- // --output-type only supports float, int, qasymms8 or qasymmu8.
+ // --output-type only supports float, float16, int, qasymms8 or qasymmu8.
case armnn::DataType::Float32:
if (params.m_OutputTypes[outputIdx].compare("float") != 0)
{
@@ -493,6 +494,14 @@ int MainImpl(const ExecuteNetworkParams& params,
". This may cause unexpected problems or random failures.";
}
break;
+ case armnn::DataType::Float16:
+ if (params.m_OutputTypes[outputIdx].compare("float16") != 0)
+ {
+ ARMNN_LOG(warning) << "Model output index: " << outputIdx << " has data type Float16. The " <<
+ "corresponding --output-type is " << params.m_OutputTypes[outputIdx] <<
+ ". This may cause unexpected problems or random failures.";
+ }
+ break;
case armnn::DataType::QAsymmU8:
if (params.m_OutputTypes[outputIdx].compare("qasymmu8") != 0)
{
@@ -530,6 +539,10 @@ int MainImpl(const ExecuteNetworkParams& params,
{
outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
}
+ else if (params.m_OutputTypes[i].compare("float16") == 0)
+ {
+ outputDataContainers.push_back(std::vector<armnn::Half>(model.GetOutputSize(i)));
+ }
else if (params.m_OutputTypes[i].compare("int") == 0)
{
outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 8ee66cf64b..25dbe91455 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -294,13 +294,13 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("y,input-type",
"The type of the input tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined inputs. "
- "Accepted values (float, int, qasymms8 or qasymmu8).",
+ "Accepted values (float, float16, int, qasymms8 or qasymmu8).",
cxxopts::value<std::string>())
("z,output-type",
"The type of the output tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined outputs. "
- "Accepted values (float, int, qasymms8 or qasymmu8).",
+ "Accepted values (float, float16, int, qasymms8 or qasymmu8).",
cxxopts::value<std::string>())
("T,tflite-executor",
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index b6087c5e5a..94dbfe78b8 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -67,6 +67,14 @@ struct ClassifierResultProcessor
});
}
+ void operator()(const std::vector<armnn::Half>& values)
+ {
+ SortPredictions(values, [](armnn::Half value)
+ {
+ return value;
+ });
+ }
+
void operator()(const std::vector<int8_t>& values)
{
SortPredictions(values, [](int8_t value)
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 6c74aaa6ed..00ed55caaf 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -34,6 +34,15 @@ auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
}
template<>
+auto ParseDataArray<armnn::DataType::Float16>(std::istream& stream)
+{
+ return ParseArrayImpl<armnn::Half>(stream, [](const std::string& s)
+ {
+ return armnn::Half(std::stof(s));
+ });
+}
+
+template<>
auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
{
return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
@@ -139,6 +148,20 @@ void TensorPrinter::operator()(const std::vector<float>& values)
WriteToFile(values);
}
+void TensorPrinter::operator()(const std::vector<armnn::Half>& values)
+{
+ if (m_PrintToConsole)
+ {
+ std::cout << m_OutputBinding << ": ";
+ ForEachValue(values, [](armnn::Half value)
+ {
+ printf("%f ", static_cast<float>(value));
+ });
+ printf("\n");
+ }
+ WriteToFile(values);
+}
+
void TensorPrinter::operator()(const std::vector<uint8_t>& values)
{
if(m_DequantizeOutput)
@@ -261,6 +284,12 @@ void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
}
}
+ else if (dataTypeStr.compare("float16") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::Float16>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::Float16>(numElements);
+ }
else if (dataTypeStr.compare("int") == 0)
{
tensorData = readFromFile ?
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index bc2868ab35..8cd5c5b310 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -36,6 +36,8 @@ struct TensorPrinter
void operator()(const std::vector<int8_t>& values);
+ void operator()(const std::vector<armnn::Half>& values);
+
private:
template<typename Container, typename Delegate>
void ForEachValue(const Container& c, Delegate delegate);