aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2021-03-12 15:58:48 +0000
committerColm Donelan <colm.donelan@arm.com>2021-03-23 13:59:15 +0000
commitc74b1750fe8cf7affdbc59edd53357e0ea4efa53 (patch)
treefc925b928147f70016605a0c123066cfdf9c15a1
parent4441d94fa0a97d4137e49315d69d32fdc0bbcd03 (diff)
downloadarmnn-c74b1750fe8cf7affdbc59edd53357e0ea4efa53.tar.gz
IVGCVSW-5724 Add import tensor handling as ClImportTensorHandleFactory.
* Add new ClImportTensorHandlefactory for tensor import. * Add unit tests. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I61884fed65e764ebd6985fe0833e43a7296d0641
-rw-r--r--src/backends/cl/CMakeLists.txt2
-rw-r--r--src/backends/cl/ClImportTensorHandleFactory.cpp122
-rw-r--r--src/backends/cl/ClImportTensorHandleFactory.hpp70
-rw-r--r--src/backends/cl/ClTensorHandleFactory.cpp12
-rw-r--r--src/backends/cl/ClTensorHandleFactory.hpp22
-rw-r--r--src/backends/cl/test/CMakeLists.txt1
-rw-r--r--src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp125
7 files changed, 337 insertions, 17 deletions
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index 78771a027e..4c0fe1f549 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -32,6 +32,8 @@ if(ARMCOMPUTECL)
ClContextDeserializer.cpp
ClContextSerializer.hpp
ClContextSerializer.cpp
+ ClImportTensorHandleFactory.cpp
+ ClImportTensorHandleFactory.hpp
ClLayerSupport.cpp
ClLayerSupport.hpp
ClRegistryInitializer.cpp
diff --git a/src/backends/cl/ClImportTensorHandleFactory.cpp b/src/backends/cl/ClImportTensorHandleFactory.cpp
new file mode 100644
index 0000000000..1812034814
--- /dev/null
+++ b/src/backends/cl/ClImportTensorHandleFactory.cpp
@@ -0,0 +1,122 @@
+//
+// Copyright © 2021 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClImportTensorHandleFactory.hpp"
+#include "ClTensorHandle.hpp"
+
+#include <armnn/utility/NumericCast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <arm_compute/core/Coordinates.h>
+#include <arm_compute/runtime/CL/CLTensor.h>
+
+namespace armnn
+{
+
+using FactoryId = ITensorHandleFactory::FactoryId;
+
+std::unique_ptr<ITensorHandle> ClImportTensorHandleFactory::CreateSubTensorHandle(
+ ITensorHandle& parent, const TensorShape& subTensorShape, const unsigned int* subTensorOrigin) const
+{
+ arm_compute::Coordinates coords;
+ arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
+
+ coords.set_num_dimensions(subTensorShape.GetNumDimensions());
+ for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); ++i)
+ {
+ // Arm compute indexes tensor coords in reverse order.
+ unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ }
+
+ const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+
+ // In order for ACL to support subtensors the concat axis cannot be on x or y and the values of x and y
+ // must match the parent shapes
+ if (coords.x() != 0 || coords.y() != 0)
+ {
+ return nullptr;
+ }
+ if ((parentShape.x() != shape.x()) || (parentShape.y() != shape.y()))
+ {
+ return nullptr;
+ }
+
+ if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+ {
+ return nullptr;
+ }
+
+ return std::make_unique<ClSubTensorHandle>(PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
+}
+
+std::unique_ptr<ITensorHandle> ClImportTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
+{
+ return ClImportTensorHandleFactory::CreateTensorHandle(tensorInfo, false);
+}
+
+std::unique_ptr<ITensorHandle> ClImportTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout) const
+{
+ return ClImportTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, false);
+}
+
+std::unique_ptr<ITensorHandle> ClImportTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const
+{
+ // If IsMemoryManaged is true then throw an exception.
+ if (IsMemoryManaged)
+ {
+ throw InvalidArgumentException("ClImportTensorHandleFactory does not support memory managed tensors.");
+ }
+ std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
+ tensorHandle->SetImportEnabledFlag(true);
+ tensorHandle->SetImportFlags(GetImportFlags());
+ return tensorHandle;
+}
+
+std::unique_ptr<ITensorHandle> ClImportTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const
+{
+ // If IsMemoryManaged is true then throw an exception.
+ if (IsMemoryManaged)
+ {
+ throw InvalidArgumentException("ClImportTensorHandleFactory does not support memory managed tensors.");
+ }
+ std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
+ // If we are not Managing the Memory then we must be importing
+ tensorHandle->SetImportEnabledFlag(true);
+ tensorHandle->SetImportFlags(GetImportFlags());
+ return tensorHandle;
+}
+
+const FactoryId& ClImportTensorHandleFactory::GetIdStatic()
+{
+ static const FactoryId s_Id(ClImportTensorHandleFactoryId());
+ return s_Id;
+}
+
+const FactoryId& ClImportTensorHandleFactory::GetId() const
+{
+ return GetIdStatic();
+}
+
+bool ClImportTensorHandleFactory::SupportsSubTensors() const
+{
+ return true;
+}
+
+MemorySourceFlags ClImportTensorHandleFactory::GetExportFlags() const
+{
+ return m_ExportFlags;
+}
+
+MemorySourceFlags ClImportTensorHandleFactory::GetImportFlags() const
+{
+ return m_ImportFlags;
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/ClImportTensorHandleFactory.hpp b/src/backends/cl/ClImportTensorHandleFactory.hpp
new file mode 100644
index 0000000000..d6550dbeef
--- /dev/null
+++ b/src/backends/cl/ClImportTensorHandleFactory.hpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2021 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <aclCommon/BaseMemoryManager.hpp>
+#include <armnn/MemorySources.hpp>
+#include <armnn/backends/IMemoryManager.hpp>
+#include <armnn/backends/ITensorHandleFactory.hpp>
+
+namespace armnn
+{
+
+constexpr const char* ClImportTensorHandleFactoryId()
+{
+ return "Arm/Cl/ImportTensorHandleFactory";
+}
+
+/**
+ * This factory creates ClTensorHandles that refer to imported memory tensors.
+ */
+class ClImportTensorHandleFactory : public ITensorHandleFactory
+{
+public:
+ static const FactoryId m_Id;
+
+ /**
+ * Create a tensor handle factory for tensors that will be imported or exported.
+ *
+ * @param importFlags
+ * @param exportFlags
+ */
+ ClImportTensorHandleFactory(MemorySourceFlags importFlags, MemorySourceFlags exportFlags)
+ : m_ImportFlags(importFlags)
+ , m_ExportFlags(exportFlags)
+ {}
+
+ std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
+ const TensorShape& subTensorShape,
+ const unsigned int* subTensorOrigin) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const override;
+
+ static const FactoryId& GetIdStatic();
+
+ const FactoryId& GetId() const override;
+
+ bool SupportsSubTensors() const override;
+
+ MemorySourceFlags GetExportFlags() const override;
+
+ MemorySourceFlags GetImportFlags() const override;
+
+private:
+ MemorySourceFlags m_ImportFlags;
+ MemorySourceFlags m_ExportFlags;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp
index 237f27a4ed..b8ee57f0bf 100644
--- a/src/backends/cl/ClTensorHandleFactory.cpp
+++ b/src/backends/cl/ClTensorHandleFactory.cpp
@@ -3,17 +3,15 @@
// SPDX-License-Identifier: MIT
//
-
#include "ClTensorHandleFactory.hpp"
#include "ClTensorHandle.hpp"
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <arm_compute/runtime/CL/CLTensor.h>
#include <arm_compute/core/Coordinates.h>
#include <arm_compute/runtime/CL/CLSubTensor.h>
-
+#include <arm_compute/runtime/CL/CLTensor.h>
namespace armnn
{
@@ -35,8 +33,7 @@ std::unique_ptr<ITensorHandle> ClTensorHandleFactory::CreateSubTensorHandle(ITen
coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
- const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(
- parent.GetShape());
+ const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
// In order for ACL to support subtensors the concat axis cannot be on x or y and the values of x and y
// must match the parent shapes
@@ -54,8 +51,7 @@ std::unique_ptr<ITensorHandle> ClTensorHandleFactory::CreateSubTensorHandle(ITen
return nullptr;
}
- return std::make_unique<ClSubTensorHandle>(
- PolymorphicDowncast<IClTensorHandle *>(&parent), shape, coords);
+ return std::make_unique<ClSubTensorHandle>(PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
}
std::unique_ptr<ITensorHandle> ClTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
@@ -120,4 +116,4 @@ MemorySourceFlags ClTensorHandleFactory::GetImportFlags() const
return m_ImportFlags;
}
-} // namespace armnn \ No newline at end of file
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/ClTensorHandleFactory.hpp b/src/backends/cl/ClTensorHandleFactory.hpp
index 13c97c0b3c..3acab0bce7 100644
--- a/src/backends/cl/ClTensorHandleFactory.hpp
+++ b/src/backends/cl/ClTensorHandleFactory.hpp
@@ -4,25 +4,29 @@
//
#pragma once
-#include <armnn/backends/ITensorHandleFactory.hpp>
#include <aclCommon/BaseMemoryManager.hpp>
-#include <armnn/backends/IMemoryManager.hpp>
#include <armnn/MemorySources.hpp>
+#include <armnn/backends/IMemoryManager.hpp>
+#include <armnn/backends/ITensorHandleFactory.hpp>
namespace armnn
{
-constexpr const char* ClTensorHandleFactoryId() { return "Arm/Cl/TensorHandleFactory"; }
+constexpr const char* ClTensorHandleFactoryId()
+{
+ return "Arm/Cl/TensorHandleFactory";
+}
-class ClTensorHandleFactory : public ITensorHandleFactory {
+class ClTensorHandleFactory : public ITensorHandleFactory
+{
public:
static const FactoryId m_Id;
ClTensorHandleFactory(std::shared_ptr<ClMemoryManager> mgr)
- : m_MemoryManager(mgr),
- m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined)),
- m_ExportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined))
- {}
+ : m_MemoryManager(mgr)
+ , m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined))
+ , m_ExportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined))
+ {}
std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
const TensorShape& subTensorShape,
@@ -56,4 +60,4 @@ private:
MemorySourceFlags m_ExportFlags;
};
-} // namespace armnn \ No newline at end of file
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt
index a2950d9d44..422c0a56e4 100644
--- a/src/backends/cl/test/CMakeLists.txt
+++ b/src/backends/cl/test/CMakeLists.txt
@@ -8,6 +8,7 @@ list(APPEND armnnClBackendUnitTests_sources
ClContextSerializerTests.cpp
ClCreateWorkloadTests.cpp
ClEndToEndTests.cpp
+ ClImportTensorHandleFactoryTests.cpp
ClJsonPrinterTests.cpp
ClLayerSupportTests.cpp
ClLayerTests.cpp
diff --git a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
new file mode 100644
index 0000000000..0c6a9c6e7b
--- /dev/null
+++ b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
@@ -0,0 +1,125 @@
+//
+// Copyright © 2021 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <cl/ClImportTensorHandleFactory.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(ClImportTensorHandleFactoryTests)
+using namespace armnn;
+
+BOOST_AUTO_TEST_CASE(ImportTensorFactoryAskedToCreateManagedTensorThrowsException)
+{
+ // Create the factory to import tensors.
+ ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
+ static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ TensorInfo tensorInfo;
+ // This factory is designed to import the memory of tensors. Asking for a handle that requires
+ // a memory manager should result in an exception.
+ BOOST_REQUIRE_THROW(factory.CreateTensorHandle(tensorInfo, true), InvalidArgumentException);
+ BOOST_REQUIRE_THROW(factory.CreateTensorHandle(tensorInfo, DataLayout::NCHW, true), InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(ImportTensorFactoryCreateMallocTensorHandle)
+{
+ // Create the factory to import tensors.
+ ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
+ static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ TensorShape tensorShape{ 6, 7, 8, 9 };
+ TensorInfo tensorInfo(tensorShape, armnn::DataType::Float32);
+ // Start with the TensorInfo factory method. Create an import tensor handle and verify the data is
+ // passed through correctly.
+ auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
+ BOOST_ASSERT(tensorHandle);
+ BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+
+ // Same method but explicitly specifying isManaged = false.
+ tensorHandle = factory.CreateTensorHandle(tensorInfo, false);
+ BOOST_CHECK(tensorHandle);
+ BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+
+ // Now try TensorInfo and DataLayout factory method.
+ tensorHandle = factory.CreateTensorHandle(tensorInfo, DataLayout::NHWC);
+ BOOST_CHECK(tensorHandle);
+ BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtensorOfImportTensor)
+{
+ // Create the factory to import tensors.
+ ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
+ static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ // Create a standard inport tensor.
+ TensorShape tensorShape{ 224, 224, 1, 1 };
+ TensorInfo tensorInfo(tensorShape, armnn::DataType::Float32);
+ auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
+ // Use the factory to create a 16x16 sub tensor.
+ TensorShape subTensorShape{ 16, 16, 1, 1 };
+ // Starting at an offset of 1x1.
+ uint32_t origin[4] = { 1, 1, 0, 0 };
+ auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
+ BOOST_CHECK(subTensor);
+ BOOST_ASSERT(subTensor->GetShape() == subTensorShape);
+ BOOST_ASSERT(subTensor->GetParent() == tensorHandle.get());
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtensorNonZeroXYIsInvalid)
+{
+ // Create the factory to import tensors.
+ ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
+ static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ // Create a standard import tensor.
+ TensorShape tensorShape{ 224, 224, 1, 1 };
+ TensorInfo tensorInfo(tensorShape, armnn::DataType::Float32);
+ auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
+ // Use the factory to create a 16x16 sub tensor.
+ TensorShape subTensorShape{ 16, 16, 1, 1 };
+ // This looks a bit backwards because of how Cl specifies tensors. Essentially we want to trigger our
+ // check "(coords.x() != 0 || coords.y() != 0)"
+ uint32_t origin[4] = { 0, 0, 1, 1 };
+ auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
+ // We expect a nullptr.
+ BOOST_ASSERT(subTensor == nullptr);
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtensorXYMustMatchParent)
+{
+ // Create the factory to import tensors.
+ ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
+ static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ // Create a standard import tensor.
+ TensorShape tensorShape{ 224, 224, 1, 1 };
+ TensorInfo tensorInfo(tensorShape, armnn::DataType::Float32);
+ auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
+ // Use the factory to create a 16x16 sub tensor but make the CL x and y axis different.
+ TensorShape subTensorShape{ 16, 16, 2, 2 };
+ // We want to trigger our ((parentShape.x() != shape.x()) || (parentShape.y() != shape.y()))
+ uint32_t origin[4] = { 1, 1, 0, 0 };
+ auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
+ // We expect a nullptr.
+ BOOST_ASSERT(subTensor == nullptr);
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtensorMustBeSmallerThanParent)
+{
+ // Create the factory to import tensors.
+ ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
+ static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ // Create a standard import tensor.
+ TensorShape tensorShape{ 224, 224, 1, 1 };
+ TensorInfo tensorInfo(tensorShape, armnn::DataType::Float32);
+ auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
+ // Ask for a subtensor that's the same size as the parent.
+ TensorShape subTensorShape{ 224, 224, 1, 1 };
+ uint32_t origin[4] = { 1, 1, 0, 0 };
+ // This should result in a nullptr.
+ auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
+ BOOST_ASSERT(subTensor == nullptr);
+}
+
+BOOST_AUTO_TEST_SUITE_END()