aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2021-11-03 12:56:41 +0000
committerColm Donelan <colm.donelan@arm.com>2021-11-03 14:06:55 +0000
commit67cc5fc08da79933361e7cd0af3b6452d2424a61 (patch)
treefba3d2f07ef09c3a8d0ddfd0cf9112d72e648f01
parent2ea38c7f56d57a4fdcf709c9d61b7bdfab4ebfd9 (diff)
downloadarmnn-67cc5fc08da79933361e7cd0af3b6452d2424a61.tar.gz
Revert "IVGCVSW-6359 Added support for Float16 (Half) to Execute Network"
This reverts commit 2d9956162dd002a41f7fb4fa6753195d33524c7f. Reason for revert: After some discussion, this does technically implement Float16 support for ExecuteNetwork, but not in a way which matches most use cases and is likely to cause issues in the future. Reverting for now. Change-Id: I4ce6de6879216e694631f5dc68e46fb793fae0a9
-rw-r--r--include/armnnUtils/TContainer.hpp13
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp15
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp4
-rw-r--r--tests/InferenceTest.inl8
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp29
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp2
6 files changed, 7 insertions, 64 deletions
diff --git a/include/armnnUtils/TContainer.hpp b/include/armnnUtils/TContainer.hpp
index d2a868ac18..a55f9df488 100644
--- a/include/armnnUtils/TContainer.hpp
+++ b/include/armnnUtils/TContainer.hpp
@@ -6,20 +6,15 @@
#pragma once
#include <armnn/TypesUtils.hpp>
-#include <Half.hpp>
#include <mapbox/variant.hpp>
namespace armnnUtils
{
-// Standard declaration of TContainer used by ArmNN
-// Changes to this declaration constitute an api/abi break, new types should be added as a separate declaration and
-// merged on the next planned api/abi update.
-using TContainer = mapbox::util::variant<std::vector<float>,
- std::vector<int>,
- std::vector<uint8_t>,
- std::vector<int8_t>,
- std::vector<armnn::Half>>;
+// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
+// defining your own.
+ using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
} // namespace armnnUtils
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 0d5271158b..a0a08d31b0 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -12,7 +12,6 @@
#include <armnnUtils/Filesystem.hpp>
#include <armnnUtils/TContainer.hpp>
#include <InferenceTest.hpp>
-#include <Half.hpp>
#if defined(ARMNN_SERIALIZER)
#include "armnnDeserializer/IDeserializer.hpp"
@@ -485,7 +484,7 @@ int MainImpl(const ExecuteNetworkParams& params,
armnn::DataType type = model.GetOutputBindingInfo(outputIdx).second.GetDataType();
switch (type)
{
- // --output-type only supports float, float16, int, qasymms8 or qasymmu8.
+ // --output-type only supports float, int, qasymms8 or qasymmu8.
case armnn::DataType::Float32:
if (params.m_OutputTypes[outputIdx].compare("float") != 0)
{
@@ -494,14 +493,6 @@ int MainImpl(const ExecuteNetworkParams& params,
". This may cause unexpected problems or random failures.";
}
break;
- case armnn::DataType::Float16:
- if (params.m_OutputTypes[outputIdx].compare("float16") != 0)
- {
- ARMNN_LOG(warning) << "Model output index: " << outputIdx << " has data type Float16. The " <<
- "corresponding --output-type is " << params.m_OutputTypes[outputIdx] <<
- ". This may cause unexpected problems or random failures.";
- }
- break;
case armnn::DataType::QAsymmU8:
if (params.m_OutputTypes[outputIdx].compare("qasymmu8") != 0)
{
@@ -539,10 +530,6 @@ int MainImpl(const ExecuteNetworkParams& params,
{
outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
}
- else if (params.m_OutputTypes[i].compare("float16") == 0)
- {
- outputDataContainers.push_back(std::vector<armnn::Half>(model.GetOutputSize(i)));
- }
else if (params.m_OutputTypes[i].compare("int") == 0)
{
outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 25dbe91455..8ee66cf64b 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -294,13 +294,13 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
("y,input-type",
"The type of the input tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined inputs. "
- "Accepted values (float, float16, int, qasymms8 or qasymmu8).",
+ "Accepted values (float, int, qasymms8 or qasymmu8).",
cxxopts::value<std::string>())
("z,output-type",
"The type of the output tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined outputs. "
- "Accepted values (float, float16, int, qasymms8 or qasymmu8).",
+ "Accepted values (float, int, qasymms8 or qasymmu8).",
cxxopts::value<std::string>())
("T,tflite-executor",
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 94dbfe78b8..b6087c5e5a 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -67,14 +67,6 @@ struct ClassifierResultProcessor
});
}
- void operator()(const std::vector<armnn::Half>& values)
- {
- SortPredictions(values, [](armnn::Half value)
- {
- return value;
- });
- }
-
void operator()(const std::vector<int8_t>& values)
{
SortPredictions(values, [](int8_t value)
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 00ed55caaf..6c74aaa6ed 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -34,15 +34,6 @@ auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
}
template<>
-auto ParseDataArray<armnn::DataType::Float16>(std::istream& stream)
-{
- return ParseArrayImpl<armnn::Half>(stream, [](const std::string& s)
- {
- return armnn::Half(std::stof(s));
- });
-}
-
-template<>
auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
{
return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
@@ -148,20 +139,6 @@ void TensorPrinter::operator()(const std::vector<float>& values)
WriteToFile(values);
}
-void TensorPrinter::operator()(const std::vector<armnn::Half>& values)
-{
- if (m_PrintToConsole)
- {
- std::cout << m_OutputBinding << ": ";
- ForEachValue(values, [](armnn::Half value)
- {
- printf("%f ", static_cast<float>(value));
- });
- printf("\n");
- }
- WriteToFile(values);
-}
-
void TensorPrinter::operator()(const std::vector<uint8_t>& values)
{
if(m_DequantizeOutput)
@@ -284,12 +261,6 @@ void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
}
}
- else if (dataTypeStr.compare("float16") == 0)
- {
- tensorData = readFromFile ?
- ParseDataArray<armnn::DataType::Float16>(inputTensorFile) :
- GenerateDummyTensorData<armnn::DataType::Float16>(numElements);
- }
else if (dataTypeStr.compare("int") == 0)
{
tensorData = readFromFile ?
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 8cd5c5b310..bc2868ab35 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -36,8 +36,6 @@ struct TensorPrinter
void operator()(const std::vector<int8_t>& values);
- void operator()(const std::vector<armnn::Half>& values);
-
private:
template<typename Container, typename Delegate>
void ForEachValue(const Container& c, Delegate delegate);