aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-12-18 16:13:06 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-01-13 11:53:53 +0000
commit4cf0fe385b934de95c022cae4a2c400d0d52377d (patch)
treeee6f9ac5f4c4b1d9d981ec9aff8b45e818259687
parentfc78446118f65d78271136b33340c9fbb8c009f1 (diff)
downloadarmnn-4cf0fe385b934de95c022cae4a2c400d0d52377d.tar.gz
IVGCVSW-5625 Add support for Float16 to Delegate
* Float16 unit tests for Reshape * Remove unsupported data type from Pad Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: Ib1804bb6e708a0552fb40d05fe8a6511936f9793
-rw-r--r--delegate/CMakeLists.txt11
-rw-r--r--delegate/src/Pad.hpp3
-rw-r--r--delegate/src/test/PadTestHelper.hpp2
-rw-r--r--delegate/src/test/RedefineTestHelper.hpp25
-rw-r--r--delegate/src/test/ReshapeTest.cpp68
-rw-r--r--delegate/src/test/TestUtils.cpp67
-rw-r--r--delegate/src/test/TestUtils.hpp13
7 files changed, 159 insertions, 30 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index c052be2e51..9f64353d9a 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -93,6 +93,15 @@ target_compile_options(flatbuffer_headers INTERFACE -Wno-sign-conversion)
target_link_libraries(armnnDelegate PUBLIC flatbuffer_headers)
+# Add libraries from armnn third-party libraries
+# Third-party header files are not warning clean
+# We can't change compilation flags on header files directly, so we need to add them to an interface library first
+add_library(thirdparty_headers INTERFACE)
+target_include_directories(thirdparty_headers INTERFACE $<BUILD_INTERFACE:${ARMNN_SOURCE_DIR}/third-party>
+ $<INSTALL_INTERFACE:include/thirdparty_headers>)
+
+target_compile_options(thirdparty_headers INTERFACE -Wno-old-style-cast)
+
option(BUILD_UNIT_TESTS "Build unit tests" ON)
if(BUILD_UNIT_TESTS)
set(armnnDelegate_unittest_sources)
@@ -140,7 +149,7 @@ if(BUILD_UNIT_TESTS)
target_include_directories(DelegateUnitTests PRIVATE third-party)
# Add half library from armnn third-party libraries
- target_include_directories(DelegateUnitTests PRIVATE ${ARMNN_SOURCE_DIR}/third-party)
+ target_link_libraries(DelegateUnitTests PRIVATE thirdparty_headers)
target_link_libraries(DelegateUnitTests PRIVATE armnnDelegate)
target_link_libraries(DelegateUnitTests PRIVATE Armnn::armnnUtils)
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index 6149819950..431b8d33f2 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -98,9 +98,6 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
case kTfLiteInt8:
descriptor.m_PadValue = tflite::GetTensorData<int8>(&tfLitepaddingValue)[0];
break;
- case kTfLiteInt16:
- descriptor.m_PadValue = tflite::GetTensorData<int16>(&tfLitepaddingValue)[0];
- break;
default:
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp
index 7221dedb20..025d13df67 100644
--- a/delegate/src/test/PadTestHelper.hpp
+++ b/delegate/src/test/PadTestHelper.hpp
@@ -73,7 +73,7 @@ std::vector<char> CreatePadTfLiteModel(
buffers.push_back(
CreateBuffer(flatBufferBuilder,
flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
- sizeof(int32_t) * paddingDim.size())));
+ sizeof(int32_t) * paddingDim.size())));
buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
std::vector<int32_t> operatorInputs;
diff --git a/delegate/src/test/RedefineTestHelper.hpp b/delegate/src/test/RedefineTestHelper.hpp
index ca8246c3a6..6f061572b4 100644
--- a/delegate/src/test/RedefineTestHelper.hpp
+++ b/delegate/src/test/RedefineTestHelper.hpp
@@ -136,7 +136,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
tflite::TensorType tensorType,
const std::vector<armnn::BackendId>& backends,
const std::vector<int32_t>& inputShape,
- const std::vector<int32_t>& outputShape,
+ std::vector<int32_t>& outputShape,
std::vector<T>& inputValues,
std::vector<T>& expectedOutputValues,
std::vector<int32_t>& targetShape,
@@ -186,28 +186,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
- auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
-
- CHECK(outputShape.size() == tfLiteDelegateOutputTensor->dims->size);
- CHECK(outputShape.size() == armnnDelegateOutputTensor->dims->size);
-
- for (size_t i = 0; i < static_cast<size_t>(tfLiteDelegateOutputTensor->dims->size); i++)
- {
- CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
- }
-
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelegateOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
- }
+ armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
}
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ReshapeTest.cpp b/delegate/src/test/ReshapeTest.cpp
index 715fed6279..11449e29b8 100644
--- a/delegate/src/test/ReshapeTest.cpp
+++ b/delegate/src/test/ReshapeTest.cpp
@@ -12,6 +12,10 @@
#include <doctest/doctest.h>
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
namespace armnnDelegate
{
@@ -41,6 +45,34 @@ void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption =
useOption);
}
+using namespace half_float::literal;
+
+void ReshapeSimpleFloat16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 3, 2, 2 };
+ std::vector<int32_t> targetShape { 1, 3, 2, 2 };
+
+ std::vector<Half> inputValues = { 5._h, -8._h, -10._h, 7._h,
+ 8._h, 12._h, -15._h, 2._h,
+ 3._h, -4._h, -1._h, -11._h };
+
+ std::vector<Half> expectedOutputValues = { 5._h, -8._h, -10._h, 7._h,
+ 8._h, 12._h, -15._h, 2._h,
+ 3._h, -4._h, -1._h, -11._h };
+
+ RedefineTest<Half>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_FLOAT16,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption);
+}
+
void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
{
// Set input data
@@ -242,6 +274,12 @@ TEST_CASE ("Reshape_Uint8_GpuAcc_Test")
ReshapeUint8Test(backends);
}
+TEST_CASE ("Reshape_Float16_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeSimpleFloat16Test(backends);
+}
+
TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
@@ -278,6 +316,12 @@ TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test")
ReshapeUint8Test(backends, false);
}
+TEST_CASE ("Reshape_Float16_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeSimpleFloat16Test(backends, false);
+}
+
} // TEST_SUITE("Reshape_GpuAccTests")
TEST_SUITE("Reshape_CpuAccTests")
@@ -319,6 +363,12 @@ TEST_CASE ("Reshape_Uint8_CpuAcc_Test")
ReshapeUint8Test(backends);
}
+TEST_CASE ("Reshape_Float16_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeSimpleFloat16Test(backends);
+}
+
TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
@@ -355,6 +405,12 @@ TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test")
ReshapeUint8Test(backends, false);
}
+TEST_CASE ("Reshape_Float16_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeSimpleFloat16Test(backends, false);
+}
+
} // TEST_SUITE("Reshape_CpuAccTests")
TEST_SUITE("Reshape_CpuRefTests")
@@ -402,6 +458,12 @@ TEST_CASE ("Reshape_Int16_CpuRef_Test")
ReshapeInt16Test(backends);
}
+TEST_CASE ("Reshape_Float16_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeSimpleFloat16Test(backends);
+}
+
TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
@@ -444,6 +506,12 @@ TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test")
ReshapeInt16Test(backends, false);
}
+TEST_CASE ("Reshape_Float16_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeSimpleFloat16Test(backends, false);
+}
+
} // TEST_SUITE("Reshape_CpuRefTests")
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/TestUtils.cpp b/delegate/src/test/TestUtils.cpp
index 31c05a678d..2787147639 100644
--- a/delegate/src/test/TestUtils.cpp
+++ b/delegate/src/test/TestUtils.cpp
@@ -8,8 +8,6 @@
namespace armnnDelegate
{
-
-
void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize)
{
auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
@@ -63,4 +61,69 @@ void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize)
}
}
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
+ }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(tensor1[i].data == tensor2[i].data);
+ }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(tensor1[i].data == half_float::detail::float2half<std::round_indeterminate, float>(tensor2[i]));
+ }
+}
+
+template <>
+void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
+ std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
+ std::vector<int32_t>& expectedOutputShape,
+ std::vector<Half>& expectedOutputValues,
+ unsigned int outputIndex)
+{
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
+ auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
+ auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
+
+ CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+ CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+
+ for (size_t i = 0; i < expectedOutputShape.size(); i++)
+ {
+ CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+ CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+ CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+ }
+
+ armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+}
+
+template <>
+void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
+{
+ auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
+ auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
+
+ }
+}
+
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/TestUtils.hpp b/delegate/src/test/TestUtils.hpp
index b165920762..ad7600d27e 100644
--- a/delegate/src/test/TestUtils.hpp
+++ b/delegate/src/test/TestUtils.hpp
@@ -5,10 +5,15 @@
#pragma once
+#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/interpreter.h>
#include <doctest/doctest.h>
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
namespace armnnDelegate
{
@@ -43,6 +48,14 @@ void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize);
/// Can be used to compare int16_t data coming from a tflite interpreter with a tolerance of 1
void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize);
+/// Can be used to compare Half (Float16) data with a tolerance of limit_of_float*100
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize);
+
+/// Can be used to compare TfLiteFloat16 data coming from a tflite interpreter
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize);
+
+/// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize);
/// Can be used to compare the output tensor shape and values
/// from armnnDelegateInterpreter and tfLiteInterpreter.