diff options
author | David Monahan <david.monahan@arm.com> | 2021-11-03 12:56:41 +0000 |
---|---|---|
committer | Colm Donelan <colm.donelan@arm.com> | 2021-11-03 14:06:55 +0000 |
commit | 67cc5fc08da79933361e7cd0af3b6452d2424a61 (patch) | |
tree | fba3d2f07ef09c3a8d0ddfd0cf9112d72e648f01 /tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp | |
parent | 2ea38c7f56d57a4fdcf709c9d61b7bdfab4ebfd9 (diff) | |
download | armnn-67cc5fc08da79933361e7cd0af3b6452d2424a61.tar.gz |
Revert "IVGCVSW-6359 Added support for Float16 (Half) to Execute Network"
This reverts commit 2d9956162dd002a41f7fb4fa6753195d33524c7f.
Reason for revert: After some discussion, this does technically implement Float16 support for ExecuteNetwork, but not in a way which matches most use cases and is likely to cause issues in the future. Reverting for now.
Change-Id: I4ce6de6879216e694631f5dc68e46fb793fae0a9
Diffstat (limited to 'tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp')
-rw-r--r-- | tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp | 29 |
1 files changed, 0 insertions, 29 deletions
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp index 00ed55caaf..6c74aaa6ed 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp @@ -34,15 +34,6 @@ auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream) } template<> -auto ParseDataArray<armnn::DataType::Float16>(std::istream& stream) -{ - return ParseArrayImpl<armnn::Half>(stream, [](const std::string& s) - { - return armnn::Half(std::stof(s)); - }); -} - -template<> auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream) { return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); }); @@ -148,20 +139,6 @@ void TensorPrinter::operator()(const std::vector<float>& values) WriteToFile(values); } -void TensorPrinter::operator()(const std::vector<armnn::Half>& values) -{ - if (m_PrintToConsole) - { - std::cout << m_OutputBinding << ": "; - ForEachValue(values, [](armnn::Half value) - { - printf("%f ", static_cast<float>(value)); - }); - printf("\n"); - } - WriteToFile(values); -} - void TensorPrinter::operator()(const std::vector<uint8_t>& values) { if(m_DequantizeOutput) @@ -284,12 +261,6 @@ void PopulateTensorWithData(armnnUtils::TContainer& tensorData, GenerateDummyTensorData<armnn::DataType::Float32>(numElements); } } - else if (dataTypeStr.compare("float16") == 0) - { - tensorData = readFromFile ? - ParseDataArray<armnn::DataType::Float16>(inputTensorFile) : - GenerateDummyTensorData<armnn::DataType::Float16>(numElements); - } else if (dataTypeStr.compare("int") == 0) { tensorData = readFromFile ? |