aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-11-11 18:01:48 +0000
committerSadik Armagan <sadik.armagan@arm.com>2020-11-12 11:35:11 +0000
commit4189cc5ca4bb12e02c5e7f86ec6079f76d845b59 (patch)
treebb5426804692e11abf000ffe9c64f7d95e104beb
parent8081536d24291794b4e189e6d5532d913a4525cb (diff)
downloadarmnn-4189cc5ca4bb12e02c5e7f86ec6079f76d845b59.tar.gz
IVGCVSW-5504 'TfLiteDelegate: Introduce FP16 and BackendOptions'
* Added BackendOptions creations of armnn_delegate * Included armnn/third-party the armnn_delegate unit tests * Updated the CreateConstTensor function Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I8e2099a465766b905bff701413307e5850b68e42
-rw-r--r--delegate/CMakeLists.txt22
-rw-r--r--delegate/include/DelegateOptions.hpp26
-rw-r--r--delegate/src/DelegateOptions.cpp10
-rw-r--r--delegate/src/DelegateUtils.hpp94
-rw-r--r--delegate/src/FullyConnected.hpp6
-rw-r--r--delegate/src/armnn_delegate.cpp7
-rw-r--r--delegate/src/test/ArmnnDelegateTest.cpp13
7 files changed, 80 insertions, 98 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 05ec851bf2..2ee00f3887 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -86,8 +86,10 @@ target_include_directories(armnnDelegate
PRIVATE
${Flatbuffers_INCLUDE_DIR})
-set(armnnDelegate_unittest_sources)
-list(APPEND armnnDelegate_unittest_sources
+option(BUILD_UNIT_TESTS "Build unit tests" ON)
+if(BUILD_UNIT_TESTS)
+ set(armnnDelegate_unittest_sources)
+ list(APPEND armnnDelegate_unittest_sources
src/test/ArmnnDelegateTest.cpp
src/test/ComparisonTest.cpp
src/test/ComparisonTestHelper.hpp
@@ -102,19 +104,23 @@ list(APPEND armnnDelegate_unittest_sources
src/test/QuantizationTest.cpp
src/test/QuantizationTestHelper.hpp)
-add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
-target_include_directories(DelegateUnitTests PRIVATE third-party)
+ add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
+ target_include_directories(DelegateUnitTests PRIVATE third-party)
-target_link_libraries(DelegateUnitTests armnnDelegate)
-target_link_libraries(DelegateUnitTests Armnn::armnnUtils)
+ # Add half library from armnn third-party libraries
+ target_include_directories(DelegateUnitTests PRIVATE ${ARMNN_SOURCE_DIR}/third-party)
-target_include_directories(DelegateUnitTests
+ target_link_libraries(DelegateUnitTests armnnDelegate)
+ target_link_libraries(DelegateUnitTests Armnn::armnnUtils)
+
+ target_include_directories(DelegateUnitTests
PRIVATE
${TfLite_INCLUDE_DIR})
-target_include_directories(DelegateUnitTests
+ target_include_directories(DelegateUnitTests
PRIVATE
${Flatbuffers_INCLUDE_DIR})
+endif()
####################################################
## Export targets
diff --git a/delegate/include/DelegateOptions.hpp b/delegate/include/DelegateOptions.hpp
index 0c8173d15f..daf20150be 100644
--- a/delegate/include/DelegateOptions.hpp
+++ b/delegate/include/DelegateOptions.hpp
@@ -17,19 +17,41 @@ namespace armnnDelegate
class DelegateOptions
{
public:
- DelegateOptions(armnn::Compute computeDevice);
+ DelegateOptions(armnn::Compute computeDevice, const std::vector<armnn::BackendOptions>& backendOptions = {});
- DelegateOptions(const std::vector<armnn::BackendId>& backends);
+ DelegateOptions(const std::vector<armnn::BackendId>& backends,
+ const std::vector<armnn::BackendOptions>& backendOptions = {});
const std::vector<armnn::BackendId>& GetBackends() const { return m_Backends; }
void SetBackends(const std::vector<armnn::BackendId>& backends) { m_Backends = backends; }
+ const std::vector<armnn::BackendOptions>& GetBackendOptions() const { return m_BackendOptions; }
+
private:
/// Which backend to run Delegate on.
/// Examples of possible values are: CpuRef, CpuAcc, GpuAcc.
/// CpuRef as default.
std::vector<armnn::BackendId> m_Backends = { armnn::Compute::CpuRef };
+
+ /// Pass backend specific options to Delegate
+ ///
+ /// For example, tuning can be enabled on GpuAcc like below
+ /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ /// m_BackendOptions.emplace_back(
+ /// BackendOptions{"GpuAcc",
+ /// {
+ /// {"TuningLevel", 2},
+ /// {"TuningFile", filename}
+ /// }
+ /// });
+ /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ /// The following backend options are available:
+ /// GpuAcc:
+ /// "TuningLevel" : int [0..3] (0=UseOnly(default) | 1=RapidTuning | 2=NormalTuning | 3=ExhaustiveTuning)
+ /// "TuningFile" : string [filenameString]
+ /// "KernelProfilingEnabled" : bool [true | false]
+ std::vector<armnn::BackendOptions> m_BackendOptions;
};
} // namespace armnnDelegate
diff --git a/delegate/src/DelegateOptions.cpp b/delegate/src/DelegateOptions.cpp
index e931fa4be4..af78685fa6 100644
--- a/delegate/src/DelegateOptions.cpp
+++ b/delegate/src/DelegateOptions.cpp
@@ -8,13 +8,15 @@
namespace armnnDelegate
{
-DelegateOptions::DelegateOptions(armnn::Compute computeDevice)
- : m_Backends({computeDevice})
+DelegateOptions::DelegateOptions(armnn::Compute computeDevice,
+ const std::vector<armnn::BackendOptions>& backendOptions)
+ : m_Backends({computeDevice}), m_BackendOptions(backendOptions)
{
}
-DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends)
- : m_Backends(backends)
+DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends,
+ const std::vector<armnn::BackendOptions>& backendOptions)
+ : m_Backends(backends), m_BackendOptions(backendOptions)
{
}
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index fb3f998283..71222276b4 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -397,91 +397,29 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
return ret;
}
-struct DataHolder
+armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
+ armnn::TensorInfo& tensorInfo,
+ armnn::Optional<armnn::PermutationVector&> permutationVector)
{
-public:
- DataHolder()
- : m_Fp32Data(nullptr), m_Uint8Data(nullptr),
- m_Int8Data(nullptr), m_Int16Data(nullptr), m_Int32Data(nullptr) {}
-
- DataHolder(std::unique_ptr<float[]>&& data)
- : m_Fp32Data(std::move(data)), m_Uint8Data(nullptr),
- m_Int8Data(nullptr), m_Int16Data(nullptr), m_Int32Data(nullptr) {}
-
- DataHolder(std::unique_ptr<uint8_t[]>&& data)
- : m_Fp32Data(nullptr), m_Uint8Data(std::move(data)),
- m_Int8Data(nullptr), m_Int16Data(nullptr), m_Int32Data(nullptr) {}
-
- DataHolder(std::unique_ptr<int8_t[]>&& data)
- : m_Fp32Data(nullptr), m_Uint8Data(nullptr),
- m_Int8Data(std::move(data)), m_Int16Data(nullptr), m_Int32Data(nullptr) {}
-
- DataHolder(std::unique_ptr<int16_t[]>&& data)
- : m_Fp32Data(nullptr), m_Uint8Data(nullptr),
- m_Int8Data(nullptr), m_Int16Data(std::move(data)), m_Int32Data(nullptr) {}
-
- DataHolder(std::unique_ptr<int32_t[]>&& data)
- : m_Fp32Data(nullptr), m_Uint8Data(nullptr),
- m_Int8Data(nullptr), m_Int16Data(nullptr), m_Int32Data(std::move(data)) {}
-
-private:
- std::unique_ptr<float[]> m_Fp32Data;
- std::unique_ptr<uint8_t[]> m_Uint8Data;
- std::unique_ptr<int8_t[]> m_Int8Data;
- std::unique_ptr<int16_t[]> m_Int16Data;
- std::unique_ptr<int32_t[]> m_Int32Data;
-};
-
-template <typename T>
-std::pair<armnn::ConstTensor, DataHolder> CreateConstTensorImpl(
- const TfLiteTensor* tensor,
- armnn::TensorInfo& tensorInfo,
- armnn::Optional<armnn::PermutationVector&> permutationVector)
-{
- std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
+ if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
+ {
+ throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + tfLiteTensor->allocation_type);
+ }
+
if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
{
- tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
- armnnUtils::Permute(tensorInfo.GetShape(),
+ std::vector<uint8_t> swizzledData;
+ swizzledData.resize(tensorInfo.GetNumBytes());
+ armnnUtils::Permute(armnnUtils::Permuted(tensorInfo.GetShape(), permutationVector.value()),
permutationVector.value(),
- reinterpret_cast<const T*>(tensor->data.raw), data.get(), sizeof(T));
+ tfLiteTensor->data.data,
+ swizzledData.data(),
+ armnn::GetDataTypeSize(tensorInfo.GetDataType()));
+ return armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, permutationVector.value()), swizzledData.data());
}
else
{
- ::memcpy(data.get(), tensor->data.raw, tensorInfo.GetNumBytes());
- }
-
- auto constData = std::make_pair(armnn::ConstTensor(tensorInfo, data.get()), std::move(data));
-
- DataHolder storedData(std::move(constData.second));
- return std::make_pair(constData.first, std::move(storedData));
-}
-
-std::pair<armnn::ConstTensor, DataHolder> CreateConstTensor(
- const TfLiteTensor* tfLiteTensor,
- armnn::TensorInfo& tensorInfo,
- armnn::Optional<armnn::PermutationVector&> permutationVector)
-{
- switch (tensorInfo.GetDataType())
- {
- case armnn::DataType::Float32:
- return CreateConstTensorImpl<float>(tfLiteTensor, tensorInfo, permutationVector);
- case armnn::DataType::QAsymmU8:
- return CreateConstTensorImpl<uint8_t>(tfLiteTensor, tensorInfo, permutationVector);
- case armnn::DataType::QSymmS8:
- return CreateConstTensorImpl<int8_t>(tfLiteTensor, tensorInfo, permutationVector);
- case armnn::DataType::QAsymmS8:
- return CreateConstTensorImpl<int8_t>(tfLiteTensor, tensorInfo, permutationVector);
- case armnn::DataType::QSymmS16:
- return CreateConstTensorImpl<int16_t>(tfLiteTensor, tensorInfo, permutationVector);
- case armnn::DataType::Signed32:
- return CreateConstTensorImpl<int32_t>(tfLiteTensor, tensorInfo, permutationVector);
- default:
- {
- throw armnn::Exception(
- "TfLiteArmnnDelegate: Unsupported data type when creating const tensor: "
- + std::string(armnn::GetDataTypeName(tensorInfo.GetDataType())));
- }
+ return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
}
}
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index f35f4c92b0..48bf06f94a 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -165,13 +165,13 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
biasTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor,
- weightsTensor.first,
- armnn::Optional<armnn::ConstTensor>(biasTensor.first));
+ weightsTensor,
+ armnn::Optional<armnn::ConstTensor>(biasTensor));
}
else
{
layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor,
- weightsTensor.first,
+ weightsTensor,
armnn::EmptyOptional());
}
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 69bd4f7350..3380c84d0b 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -118,10 +118,15 @@ Delegate::Delegate(armnnDelegate::DelegateOptions options)
{
// Create ArmNN Runtime
armnn::IRuntime::CreationOptions runtimeOptions;
+
+ auto backendOptions = m_Options.GetBackendOptions();
+ if (!backendOptions.empty())
+ {
+ runtimeOptions.m_BackendOptions = backendOptions;
+ }
m_Runtime = armnn::IRuntime::Create(runtimeOptions);
std::vector<armnn::BackendId> backends;
-
if (m_Runtime)
{
const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
diff --git a/delegate/src/test/ArmnnDelegateTest.cpp b/delegate/src/test/ArmnnDelegateTest.cpp
index 7cec70b022..050fe45ca0 100644
--- a/delegate/src/test/ArmnnDelegateTest.cpp
+++ b/delegate/src/test/ArmnnDelegateTest.cpp
@@ -35,9 +35,18 @@ TEST_CASE ("ArmnnDelegate Registered")
const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);
- // create the Armnn Delegate
+ // Create the Armnn Delegate
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::vector<armnn::BackendOptions> backendOptions;
+ backendOptions.emplace_back(
+ armnn::BackendOptions{ "BackendName",
+ {
+ { "Option1", 42 },
+ { "Option2", true }
+ }}
+ );
+
+ armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
armnnDelegate::TfLiteArmnnDelegateDelete);