aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2022-12-16 12:35:16 +0000
committerMatthew Sloyan <matthew.sloyan@arm.com>2022-12-23 16:28:44 +0000
commit267c985a6322fbc1efa22ba44188ac867537f1b1 (patch)
tree91ca88e9bc7b99b660e0ded6cfba7488cc96a1bd
parentcb223b7f485a33242fb9b18bc404bb33c29107e4 (diff)
downloadarmnn-267c985a6322fbc1efa22ba44188ac867537f1b1.tar.gz
IVGCVSW-7159 Add GpuFsa backend skeleton
* Basic skeleton code added for GPU Dynamic Fusion backend. Signed-off-by: James Conroy <james.conroy@arm.com> Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ia49ecf994e278d034e85238be37fefaca84c543d
-rw-r--r--cmake/GlobalConfig.cmake1
-rw-r--r--src/backends/gpuFsa/CMakeLists.txt39
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.cpp107
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.hpp56
-rw-r--r--src/backends/gpuFsa/GpuFsaBackendId.hpp12
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.cpp34
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.hpp23
-rw-r--r--src/backends/gpuFsa/GpuFsaMemoryManager.cpp101
-rw-r--r--src/backends/gpuFsa/GpuFsaMemoryManager.hpp59
-rw-r--r--src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp19
-rw-r--r--src/backends/gpuFsa/GpuFsaTensorHandle.cpp176
-rw-r--r--src/backends/gpuFsa/GpuFsaTensorHandle.hpp83
-rw-r--r--src/backends/gpuFsa/GpuFsaTensorHandleFactory.cpp87
-rw-r--r--src/backends/gpuFsa/GpuFsaTensorHandleFactory.hpp56
-rw-r--r--src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp115
-rw-r--r--src/backends/gpuFsa/GpuFsaWorkloadFactory.hpp67
-rw-r--r--src/backends/gpuFsa/backend.cmake14
-rw-r--r--src/backends/gpuFsa/backend.mk57
-rw-r--r--src/backends/gpuFsa/test/CMakeLists.txt18
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp8
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp13
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerTests.cpp12
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp11
-rw-r--r--src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp46
-rw-r--r--src/backends/gpuFsa/workloads/CMakeLists.txt16
-rw-r--r--src/backends/gpuFsa/workloads/GpuFsaBaseWorkload.hpp39
26 files changed, 1269 insertions, 0 deletions
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index dd20e15076..bc6cd32385 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -10,6 +10,7 @@ option(BUILD_TESTS "Build test applications" OFF)
option(BUILD_FOR_COVERAGE "Use no optimization and output .gcno and .gcda files" OFF)
option(ARMCOMPUTENEON "Build with ARM Compute NEON support" OFF)
option(ARMCOMPUTECL "Build with ARM Compute OpenCL support" OFF)
+option(ARMNNGPUFSA "Build with GPU Dynamic Fusion Backend" OFF)
option(ARMNNREF "Build with ArmNN reference support" ON)
option(ARMNNTOSAREF "Build with TOSA reference support" OFF)
option(PROFILING_BACKEND_STREAMLINE "Forward the armNN profiling events to DS-5/Streamline as annotations" OFF)
diff --git a/src/backends/gpuFsa/CMakeLists.txt b/src/backends/gpuFsa/CMakeLists.txt
new file mode 100644
index 0000000000..f5ddb34854
--- /dev/null
+++ b/src/backends/gpuFsa/CMakeLists.txt
@@ -0,0 +1,39 @@
+#
+# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+if(ARMNNGPUFSA)
+ list(APPEND armnnGpuFsaBackend_sources
+ GpuFsaBackend.cpp
+ GpuFsaBackend.hpp
+ GpuFsaBackendId.hpp
+ GpuFsaTensorHandle.hpp
+ GpuFsaTensorHandle.cpp
+ GpuFsaLayerSupport.cpp
+ GpuFsaLayerSupport.hpp
+ GpuFsaMemoryManager.hpp
+ GpuFsaMemoryManager.cpp
+ GpuFsaRegistryInitializer.cpp
+ GpuFsaWorkloadFactory.cpp
+ GpuFsaWorkloadFactory.hpp
+ GpuFsaTensorHandleFactory.cpp
+ GpuFsaTensorHandleFactory.hpp
+ )
+
+ add_subdirectory(workloads)
+
+ if(BUILD_UNIT_TESTS)
+ add_subdirectory(test)
+ endif()
+
+else()
+ list(APPEND armnnGpuFsaBackend_sources
+ GpuFsaBackendId.hpp
+ )
+endif()
+
+add_library(armnnGpuFsaBackend OBJECT ${armnnGpuFsaBackend_sources})
+target_include_directories(armnnGpuFsaBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnGpuFsaBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
+target_include_directories(armnnGpuFsaBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
new file mode 100644
index 0000000000..9c2f4a0df6
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaBackend.hpp"
+#include "GpuFsaBackendId.hpp"
+#include "GpuFsaWorkloadFactory.hpp"
+#include "GpuFsaLayerSupport.hpp"
+#include "GpuFsaTensorHandleFactory.hpp"
+
+#include <armnn/BackendRegistry.hpp>
+#include <armnn/backends/IBackendContext.hpp>
+#include <armnn/backends/IMemoryManager.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/DefaultAllocator.hpp>
+#include <backendsCommon/SubgraphUtils.hpp>
+
+#include <Optimizer.hpp>
+
+namespace armnn
+{
+
+const BackendId& GpuFsaBackend::GetIdStatic()
+{
+ static const BackendId s_Id{GpuFsaBackendId()};
+ return s_Id;
+}
+
+IBackendInternal::IWorkloadFactoryPtr GpuFsaBackend::CreateWorkloadFactory(
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
+{
+ return std::make_unique<GpuFsaWorkloadFactory>(PolymorphicPointerDowncast<GpuFsaMemoryManager>(memoryManager));
+}
+
+IBackendInternal::IWorkloadFactoryPtr GpuFsaBackend::CreateWorkloadFactory(
+ class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
+{
+ auto memoryManager = std::make_shared<GpuFsaMemoryManager>();
+
+ tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
+
+ auto factory = std::make_unique<GpuFsaTensorHandleFactory>(memoryManager);
+ // Register copy and import factory pair
+ tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
+ // Register the factory
+ tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
+
+ return std::make_unique<GpuFsaWorkloadFactory>(PolymorphicPointerDowncast<GpuFsaMemoryManager>(memoryManager));
+}
+
+IBackendInternal::IBackendContextPtr GpuFsaBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
+{
+ return IBackendContextPtr{};
+}
+
+IBackendInternal::IBackendProfilingContextPtr GpuFsaBackend::CreateBackendProfilingContext(
+ const IRuntime::CreationOptions&, IBackendProfilingPtr&)
+{
+ return IBackendProfilingContextPtr{};
+}
+
+IBackendInternal::IMemoryManagerUniquePtr GpuFsaBackend::CreateMemoryManager() const
+{
+ return std::make_unique<GpuFsaMemoryManager>();
+}
+
+IBackendInternal::ILayerSupportSharedPtr GpuFsaBackend::GetLayerSupport() const
+{
+ static ILayerSupportSharedPtr layerSupport{new GpuFsaLayerSupport};
+ return layerSupport;
+}
+
+OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
+ const ModelOptions& modelOptions) const
+{
+ OptimizationViews optimizationViews(modelOptions);
+ optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
+
+ return optimizationViews;
+}
+
+std::vector<ITensorHandleFactory::FactoryId> GpuFsaBackend::GetHandleFactoryPreferences() const
+{
+ return std::vector<ITensorHandleFactory::FactoryId> { GpuFsaTensorHandleFactory::GetIdStatic() };
+}
+
+void GpuFsaBackend::RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry)
+{
+ auto memoryManager = std::make_shared<GpuFsaMemoryManager>();
+
+ registry.RegisterMemoryManager(memoryManager);
+
+ auto factory = std::make_unique<GpuFsaTensorHandleFactory>(memoryManager);
+
+ // Register copy and import factory pair
+ registry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
+ // Register the factory
+ registry.RegisterFactory(std::move(factory));
+}
+
+std::unique_ptr<ICustomAllocator> GpuFsaBackend::GetDefaultAllocator() const
+{
+ return std::make_unique<DefaultAllocator>();
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaBackend.hpp b/src/backends/gpuFsa/GpuFsaBackend.hpp
new file mode 100644
index 0000000000..803c6a4c66
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaBackend.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IBackendInternal.hpp>
+
+namespace armnn
+{
+
+class GpuFsaBackend : public IBackendInternal
+{
+public:
+ GpuFsaBackend() = default;
+ ~GpuFsaBackend() = default;
+
+ static const BackendId& GetIdStatic();
+ const BackendId& GetId() const override
+ {
+ return GetIdStatic();
+ }
+
+ IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override;
+
+ IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) const override;
+
+ IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(
+ class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const override;
+
+ IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
+
+ IBackendInternal::IBackendProfilingContextPtr
+ CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
+ IBackendProfilingPtr& backendProfiling) override;
+
+ IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+
+ OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
+ const ModelOptions& modelOptions) const override;
+
+ std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;
+
+ void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) override;
+
+ std::unique_ptr<ICustomAllocator> GetDefaultAllocator() const override;
+
+private:
+ // Private members
+
+protected:
+ // Protected members
+};
+
+} // namespace armnn
diff --git a/src/backends/gpuFsa/GpuFsaBackendId.hpp b/src/backends/gpuFsa/GpuFsaBackendId.hpp
new file mode 100644
index 0000000000..6d1ff3f780
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaBackendId.hpp
@@ -0,0 +1,12 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+namespace armnn
+{
+
+constexpr const char * GpuFsaBackendId() { return "GpuFsa"; }
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
new file mode 100644
index 0000000000..6ae63a5668
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaLayerSupport.hpp"
+
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <LayerSupportCommon.hpp>
+
+#include <vector>
+
+namespace armnn
+{
+
+bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(type);
+ IgnoreUnused(infos);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(lstmParamsInfo);
+ IgnoreUnused(quantizedLstmInputParamsInfo);
+ IgnoreUnused(reasonIfUnsupported);
+
+ return false;
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.hpp b/src/backends/gpuFsa/GpuFsaLayerSupport.hpp
new file mode 100644
index 0000000000..dffc84cdcb
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <backendsCommon/LayerSupportBase.hpp>
+#include <backendsCommon/LayerSupportRules.hpp>
+
+namespace armnn {
+
+class GpuFsaLayerSupport : public ILayerSupport {
+public:
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>&,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaMemoryManager.cpp b/src/backends/gpuFsa/GpuFsaMemoryManager.cpp
new file mode 100644
index 0000000000..4eefb87d88
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaMemoryManager.cpp
@@ -0,0 +1,101 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "GpuFsaMemoryManager.hpp"
+
+#include <armnn/utility/Assert.hpp>
+
+#include <algorithm>
+
+namespace armnn
+{
+
+GpuFsaMemoryManager::GpuFsaMemoryManager()
+{}
+
+GpuFsaMemoryManager::~GpuFsaMemoryManager()
+{}
+
+GpuFsaMemoryManager::Pool* GpuFsaMemoryManager::Manage(unsigned int numBytes)
+{
+ if (!m_FreePools.empty())
+ {
+ Pool* res = m_FreePools.back();
+ m_FreePools.pop_back();
+ res->Reserve(numBytes);
+ return res;
+ }
+ else
+ {
+ m_Pools.push_front(Pool(numBytes));
+ return &m_Pools.front();
+ }
+}
+
+void GpuFsaMemoryManager::Allocate(GpuFsaMemoryManager::Pool* pool)
+{
+ ARMNN_ASSERT(pool);
+ m_FreePools.push_back(pool);
+}
+
+void* GpuFsaMemoryManager::GetPointer(GpuFsaMemoryManager::Pool* pool)
+{
+ return pool->GetPointer();
+}
+
+void GpuFsaMemoryManager::Acquire()
+{
+ for (Pool &pool: m_Pools)
+ {
+ pool.Acquire();
+ }
+}
+
+void GpuFsaMemoryManager::Release()
+{
+ for (Pool &pool: m_Pools)
+ {
+ pool.Release();
+ }
+}
+
+GpuFsaMemoryManager::Pool::Pool(unsigned int numBytes)
+ : m_Size(numBytes),
+ m_Pointer(nullptr)
+{}
+
+GpuFsaMemoryManager::Pool::~Pool()
+{
+ if (m_Pointer)
+ {
+ Release();
+ }
+}
+
+void* GpuFsaMemoryManager::Pool::GetPointer()
+{
+ ARMNN_ASSERT_MSG(m_Pointer, "GpuFsaMemoryManager::Pool::GetPointer() called when memory not acquired");
+ return m_Pointer;
+}
+
+void GpuFsaMemoryManager::Pool::Reserve(unsigned int numBytes)
+{
+ ARMNN_ASSERT_MSG(!m_Pointer, "GpuFsaMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ m_Size = std::max(m_Size, numBytes);
+}
+
+void GpuFsaMemoryManager::Pool::Acquire()
+{
+ ARMNN_ASSERT_MSG(!m_Pointer, "GpuFsaMemoryManager::Pool::Acquire() called when memory already acquired");
+ m_Pointer = ::operator new(size_t(m_Size));
+}
+
+void GpuFsaMemoryManager::Pool::Release()
+{
+ ARMNN_ASSERT_MSG(m_Pointer, "GpuFsaMemoryManager::Pool::Release() called when memory not acquired");
+ ::operator delete(m_Pointer);
+ m_Pointer = nullptr;
+}
+
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaMemoryManager.hpp b/src/backends/gpuFsa/GpuFsaMemoryManager.hpp
new file mode 100644
index 0000000000..636b839a51
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaMemoryManager.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IMemoryManager.hpp>
+
+#include <forward_list>
+#include <vector>
+
+namespace armnn
+{
+
+// A dummy MemoryManager which will be deleted once the GpuFsa Backend is integrated with ClMemoryManager
+class GpuFsaMemoryManager : public IMemoryManager
+{
+public:
+ GpuFsaMemoryManager();
+ virtual ~GpuFsaMemoryManager();
+
+ class Pool;
+
+ Pool* Manage(unsigned int numBytes);
+
+ void Allocate(Pool *pool);
+
+ void* GetPointer(Pool *pool);
+
+ void Acquire() override;
+ void Release() override;
+
+ class Pool
+ {
+ public:
+ Pool(unsigned int numBytes);
+ ~Pool();
+
+ void Acquire();
+ void Release();
+
+ void* GetPointer();
+
+ void Reserve(unsigned int numBytes);
+
+ private:
+ unsigned int m_Size;
+ void* m_Pointer;
+ };
+
+private:
+ GpuFsaMemoryManager(const GpuFsaMemoryManager&) = delete; // Noncopyable
+ GpuFsaMemoryManager& operator=(const GpuFsaMemoryManager&) = delete; // Noncopyable
+
+ std::forward_list<Pool> m_Pools;
+ std::vector<Pool*> m_FreePools;
+};
+
+}
diff --git a/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp b/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp
new file mode 100644
index 0000000000..875b7d7112
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaRegistryInitializer.cpp
@@ -0,0 +1,19 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "GpuFsaBackend.hpp"
+#include <armnn/BackendRegistry.hpp>
+namespace
+{
+using namespace armnn;
+static BackendRegistry::StaticRegistryInitializer g_RegisterHelper
+{
+ BackendRegistryInstance(),
+ GpuFsaBackend::GetIdStatic(),
+ []()
+ {
+ return IBackendInternalUniquePtr(new GpuFsaBackend);
+ }
+};
+} // Anonymous namespace \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaTensorHandle.cpp b/src/backends/gpuFsa/GpuFsaTensorHandle.cpp
new file mode 100644
index 0000000000..e806be49bb
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaTensorHandle.cpp
@@ -0,0 +1,176 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "GpuFsaTensorHandle.hpp"
+
+namespace armnn
+{
+GpuFsaTensorHandle::GpuFsaTensorHandle(const TensorInfo& tensorInfo,
+ std::shared_ptr<GpuFsaMemoryManager>& memoryManager)
+ : m_TensorInfo(tensorInfo)
+ , m_MemoryManager(memoryManager)
+ , m_Pool(nullptr)
+ , m_UnmanagedMemory(nullptr)
+ , m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Undefined))
+ , m_Imported(false)
+ , m_IsImportEnabled(false)
+{}
+
+GpuFsaTensorHandle::GpuFsaTensorHandle(const TensorInfo& tensorInfo,
+ MemorySourceFlags importFlags)
+ : m_TensorInfo(tensorInfo)
+ , m_Pool(nullptr)
+ , m_UnmanagedMemory(nullptr)
+ , m_ImportFlags(importFlags)
+ , m_Imported(false)
+ , m_IsImportEnabled(true)
+{}
+
+GpuFsaTensorHandle::~GpuFsaTensorHandle()
+{
+ if (!m_Pool)
+ {
+ // unmanaged
+ if (!m_Imported)
+ {
+ ::operator delete(m_UnmanagedMemory);
+ }
+ }
+}
+
+void GpuFsaTensorHandle::Manage()
+{
+ if (!m_IsImportEnabled)
+ {
+ ARMNN_ASSERT_MSG(!m_Pool, "GpuFsaTensorHandle::Manage() called twice");
+ ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "GpuFsaTensorHandle::Manage() called after Allocate()");
+
+ m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
+ }
+}
+
+void GpuFsaTensorHandle::Allocate()
+{
+ // If import is enabled, do not allocate the tensor
+ if (!m_IsImportEnabled)
+ {
+
+ if (!m_UnmanagedMemory)
+ {
+ if (!m_Pool)
+ {
+ // unmanaged
+ m_UnmanagedMemory = ::operator new(m_TensorInfo.GetNumBytes());
+ }
+ else
+ {
+ m_MemoryManager->Allocate(m_Pool);
+ }
+ }
+ else
+ {
+ throw InvalidArgumentException("GpuFsaTensorHandle::Allocate Trying to allocate a GpuFsaTensorHandle"
+ "that already has allocated memory.");
+ }
+ }
+}
+
+const void* GpuFsaTensorHandle::Map(bool /*unused*/) const
+{
+ return GetPointer();
+}
+
+void* GpuFsaTensorHandle::GetPointer() const
+{
+ if (m_UnmanagedMemory)
+ {
+ return m_UnmanagedMemory;
+ }
+ else if (m_Pool)
+ {
+ return m_MemoryManager->GetPointer(m_Pool);
+ }
+ else
+ {
+ throw NullPointerException("GpuFsaTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+ }
+}
+
+void GpuFsaTensorHandle::CopyOutTo(void* dest) const
+{
+ const void *src = GetPointer();
+ ARMNN_ASSERT(src);
+ memcpy(dest, src, m_TensorInfo.GetNumBytes());
+}
+
+void GpuFsaTensorHandle::CopyInFrom(const void* src)
+{
+ void *dest = GetPointer();
+ ARMNN_ASSERT(dest);
+ memcpy(dest, src, m_TensorInfo.GetNumBytes());
+}
+
+bool GpuFsaTensorHandle::Import(void* memory, MemorySource source)
+{
+ if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
+ {
+ if (m_IsImportEnabled && source == MemorySource::Malloc)
+ {
+ // Check memory alignment
+ if(!CanBeImported(memory, source))
+ {
+ if (m_Imported)
+ {
+ m_Imported = false;
+ m_UnmanagedMemory = nullptr;
+ }
+ return false;
+ }
+
+ // m_UnmanagedMemory not yet allocated.
+ if (!m_Imported && !m_UnmanagedMemory)
+ {
+ m_UnmanagedMemory = memory;
+ m_Imported = true;
+ return true;
+ }
+
+ // m_UnmanagedMemory initially allocated with Allocate().
+ if (!m_Imported && m_UnmanagedMemory)
+ {
+ return false;
+ }
+
+ // m_UnmanagedMemory previously imported.
+ if (m_Imported)
+ {
+ m_UnmanagedMemory = memory;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool GpuFsaTensorHandle::CanBeImported(void* memory, MemorySource source)
+{
+ if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
+ {
+ if (m_IsImportEnabled && source == MemorySource::Malloc)
+ {
+ uintptr_t alignment = GetDataTypeSize(m_TensorInfo.GetDataType());
+ if (reinterpret_cast<uintptr_t>(memory) % alignment)
+ {
+ return false;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+
+
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaTensorHandle.hpp b/src/backends/gpuFsa/GpuFsaTensorHandle.hpp
new file mode 100644
index 0000000000..b2da50a467
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaTensorHandle.hpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/TensorHandle.hpp>
+
+#include "GpuFsaMemoryManager.hpp"
+
+namespace armnn
+{
+
+// An implementation of ITensorHandle with simple "bump the pointer" memory-management behaviour
+// Will be refactored to look more like ClTensorHandle.hpp and use ClMemoryManager instead of GpuFsaMemoryManager
+class GpuFsaTensorHandle : public ITensorHandle
+{
+public:
+ GpuFsaTensorHandle(const TensorInfo& tensorInfo, std::shared_ptr<GpuFsaMemoryManager>& memoryManager);
+
+ GpuFsaTensorHandle(const TensorInfo& tensorInfo, MemorySourceFlags importFlags);
+
+ ~GpuFsaTensorHandle();
+
+ virtual void Manage() override;
+
+ virtual void Allocate() override;
+
+ virtual ITensorHandle* GetParent() const override
+ {
+ return nullptr;
+ }
+
+ virtual const void* Map(bool /* blocking = true */) const override;
+ using ITensorHandle::Map;
+
+ virtual void Unmap() const override
+ {}
+
+ TensorShape GetStrides() const override
+ {
+ return GetUnpaddedTensorStrides(m_TensorInfo);
+ }
+
+ TensorShape GetShape() const override
+ {
+ return m_TensorInfo.GetShape();
+ }
+
+ const TensorInfo& GetTensorInfo() const
+ {
+ return m_TensorInfo;
+ }
+
+ virtual MemorySourceFlags GetImportFlags() const override
+ {
+ return m_ImportFlags;
+ }
+
+ virtual bool Import(void* memory, MemorySource source) override;
+ virtual bool CanBeImported(void* memory, MemorySource source) override;
+
+private:
+ // Only used for testing
+ void CopyOutTo(void*) const override;
+ void CopyInFrom(const void*) override;
+
+ void* GetPointer() const;
+
+ GpuFsaTensorHandle(const GpuFsaTensorHandle& other) = delete; // noncopyable
+ GpuFsaTensorHandle& operator=(const GpuFsaTensorHandle& other) = delete; //noncopyable
+
+ TensorInfo m_TensorInfo;
+
+ std::shared_ptr<GpuFsaMemoryManager> m_MemoryManager;
+ GpuFsaMemoryManager::Pool* m_Pool;
+ mutable void* m_UnmanagedMemory;
+ MemorySourceFlags m_ImportFlags;
+ bool m_Imported;
+ bool m_IsImportEnabled;
+};
+
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaTensorHandleFactory.cpp b/src/backends/gpuFsa/GpuFsaTensorHandleFactory.cpp
new file mode 100644
index 0000000000..cd9d8cd64d
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaTensorHandleFactory.cpp
@@ -0,0 +1,87 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaTensorHandle.hpp"
+#include "GpuFsaTensorHandleFactory.hpp"
+
+#include "armnn/Logging.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
+namespace armnn
+{
+
+using FactoryId = ITensorHandleFactory::FactoryId;
+
+const FactoryId& GpuFsaTensorHandleFactory::GetIdStatic()
+{
+ static const FactoryId s_Id(GpuFsaTensorHandleFactoryId());
+ return s_Id;
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateSubTensorHandle(ITensorHandle& parent,
+ const TensorShape& subTensorShape,
+ const unsigned int* subTensorOrigin)
+ const
+{
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
+ return nullptr;
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
+{
+ return GpuFsaTensorHandleFactory::CreateTensorHandle(tensorInfo, true);
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout) const
+{
+ return GpuFsaTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, true);
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const
+{
+ std::unique_ptr<GpuFsaTensorHandle> handle = std::make_unique<GpuFsaTensorHandle>(tensorInfo, m_MemoryManager);
+ if (!IsMemoryManaged)
+ {
+ ARMNN_LOG(warning) << "GpuFsaTensorHandleFactory only has support for memory managed.";
+ }
+ return handle;
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const
+{
+ IgnoreUnused(dataLayout);
+ std::unique_ptr<GpuFsaTensorHandle> handle = std::make_unique<GpuFsaTensorHandle>(tensorInfo, m_MemoryManager);
+ if (!IsMemoryManaged)
+ {
+ ARMNN_LOG(warning) << "GpuFsaTensorHandleFactory only has support for memory managed.";
+ }
+ return handle;
+}
+
+const FactoryId& GpuFsaTensorHandleFactory::GetId() const
+{
+ return GetIdStatic();
+}
+
+bool GpuFsaTensorHandleFactory::SupportsSubTensors() const
+{
+ return false;
+}
+
+MemorySourceFlags GpuFsaTensorHandleFactory::GetExportFlags() const
+{
+ return MemorySourceFlags(MemorySource::Undefined);
+}
+
+MemorySourceFlags GpuFsaTensorHandleFactory::GetImportFlags() const
+{
+ return MemorySourceFlags(MemorySource::Undefined);
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaTensorHandleFactory.hpp b/src/backends/gpuFsa/GpuFsaTensorHandleFactory.hpp
new file mode 100644
index 0000000000..9f88de598b
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaTensorHandleFactory.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "GpuFsaMemoryManager.hpp"
+
+#include <armnn/backends/ITensorHandleFactory.hpp>
+
+namespace armnn
+{
+
+constexpr const char * GpuFsaTensorHandleFactoryId() { return "Arm/GpuFsa/TensorHandleFactory"; }
+
+class GpuFsaTensorHandleFactory : public ITensorHandleFactory
+{
+
+public:
+ GpuFsaTensorHandleFactory(std::shared_ptr<GpuFsaMemoryManager> mgr)
+ : m_MemoryManager(mgr)
+ {}
+
+ std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
+ TensorShape const& subTensorShape,
+ unsigned int const* subTensorOrigin) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const override;
+
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const override;
+
+ static const FactoryId& GetIdStatic();
+
+ const FactoryId& GetId() const override;
+
+ bool SupportsSubTensors() const override;
+
+ MemorySourceFlags GetExportFlags() const override;
+
+ MemorySourceFlags GetImportFlags() const override;
+
+private:
+ mutable std::shared_ptr<GpuFsaMemoryManager> m_MemoryManager;
+
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp b/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp
new file mode 100644
index 0000000000..687c8c0ac8
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp
@@ -0,0 +1,115 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <Layer.hpp>
+#include <armnn/backends/MemCopyWorkload.hpp>
+#include <armnn/backends/TensorHandle.hpp>
+#include "GpuFsaWorkloadFactory.hpp"
+#include "GpuFsaBackendId.hpp"
+#include "GpuFsaTensorHandle.hpp"
+
+namespace armnn
+{
+
+namespace
+{
+static const BackendId s_Id{GpuFsaBackendId()};
+}
+template <typename QueueDescriptorType>
+std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+ const WorkloadInfo& info) const
+{
+ IgnoreUnused(descriptor);
+ IgnoreUnused(info);
+ return nullptr;
+}
+
+template <DataType ArmnnType>
+bool IsDataType(const WorkloadInfo& info)
+{
+ auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
+ auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
+ if (it != std::end(info.m_InputTensorInfos))
+ {
+ return true;
+ }
+ it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
+ if (it != std::end(info.m_OutputTensorInfos))
+ {
+ return true;
+ }
+ return false;
+}
+
+GpuFsaWorkloadFactory::GpuFsaWorkloadFactory(const std::shared_ptr<GpuFsaMemoryManager>& memoryManager)
+ : m_MemoryManager(memoryManager)
+{
+}
+
+GpuFsaWorkloadFactory::GpuFsaWorkloadFactory()
+ : m_MemoryManager(new GpuFsaMemoryManager())
+{
+}
+
+const BackendId& GpuFsaWorkloadFactory::GetBackendId() const
+{
+ return s_Id;
+}
+
+bool GpuFsaWorkloadFactory::IsLayerSupported(const Layer& layer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported)
+{
+ return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
+}
+
+bool GpuFsaWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported,
+ const ModelOptions& modelOptions)
+{
+ return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool isMemoryManaged) const
+{
+ if (isMemoryManaged)
+ {
+ return std::make_unique<GpuFsaTensorHandle>(tensorInfo, m_MemoryManager);
+ }
+ else
+ {
+ return std::make_unique<GpuFsaTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
+ }
+}
+
+std::unique_ptr<ITensorHandle> GpuFsaWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool isMemoryManaged) const
+{
+ IgnoreUnused(dataLayout);
+
+ if (isMemoryManaged)
+ {
+ return std::make_unique<GpuFsaTensorHandle>(tensorInfo, m_MemoryManager);
+ }
+ else
+ {
+ return std::make_unique<GpuFsaTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
+ }
+}
+
+std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ IgnoreUnused(type);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(info);
+
+ return nullptr;
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/GpuFsaWorkloadFactory.hpp b/src/backends/gpuFsa/GpuFsaWorkloadFactory.hpp
new file mode 100644
index 0000000000..0d80f0363c
--- /dev/null
+++ b/src/backends/gpuFsa/GpuFsaWorkloadFactory.hpp
@@ -0,0 +1,67 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "GpuFsaMemoryManager.hpp"
+
+#include <armnn/Optional.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+namespace armnn
+{
+
+// Dynamic Fusion workload factory.
+class GpuFsaWorkloadFactory : public IWorkloadFactory
+{
+public:
+ explicit GpuFsaWorkloadFactory(const std::shared_ptr<GpuFsaMemoryManager>& memoryManager);
+ GpuFsaWorkloadFactory();
+
+ ~GpuFsaWorkloadFactory() {}
+
+ const BackendId& GetBackendId() const override;
+
+ static bool IsLayerSupported(const Layer& layer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported);
+
+ static bool IsLayerSupported(const IConnectableLayer& layer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported,
+ const ModelOptions& modelOptions);
+
+ bool SupportsSubTensors() const override { return false; }
+
+ ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
+ std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
+ TensorShape const& subTensorShape,
+ unsigned int const* subTensorOrigin) const override
+ {
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
+ return nullptr;
+ }
+
+ ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged = true) const override;
+
+ ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool IsMemoryManaged = true) const override;
+
+ std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+private:
+ template <typename QueueDescriptorType>
+ std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const;
+
+ mutable std::shared_ptr<GpuFsaMemoryManager> m_MemoryManager;
+};
+
+} // namespace armnn
diff --git a/src/backends/gpuFsa/backend.cmake b/src/backends/gpuFsa/backend.cmake
new file mode 100644
index 0000000000..589af19c22
--- /dev/null
+++ b/src/backends/gpuFsa/backend.cmake
@@ -0,0 +1,14 @@
+#
+# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/gpuFsa)
+list(APPEND armnnLibraries armnnGpuFsaBackend)
+
+if(ARMNNGPUFSA)
+ list(APPEND armnnLibraries armnnGpuFsaBackendWorkloads)
+ list(APPEND armnnUnitTestLibraries armnnGpuFsaBackendUnitTests)
+else()
+ message(STATUS "GPU Dynamic Fusion backend is disabled")
+endif() \ No newline at end of file
diff --git a/src/backends/gpuFsa/backend.mk b/src/backends/gpuFsa/backend.mk
new file mode 100644
index 0000000000..840e10338c
--- /dev/null
+++ b/src/backends/gpuFsa/backend.mk
@@ -0,0 +1,57 @@
+#
+# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+# BACKEND_SOURCES contains the list of files to be included
+# in the Android build and it is picked up by the Android.mk
+# file in the root of ArmNN
+
+# The variable to enable/disable the GPU Dynamic Fusion backend
+# (ARMNN_GPU_FSA_ENABLED is declared in android-nn-driver/Android.mk)
+ifeq ($(ARMNN_GPU_FSA_ENABLED),1)
+
+# ARMNN_GPU_FSA_ENABLED == 1
+# Include the source files for the GPU Dynamic Fusion backend
+
+BACKEND_SOURCES := \
+ GpuFsaBackend.cpp \
+ GpuFsaLayerSupport.cpp \
+ GpuFsaMemoryManager.cpp \
+ GpuFsaRegistryInitializer.cpp \
+ GpuFsaTensorHandle.cpp \
+ GpuFsaTensorHandleFactory.cpp \
+ GpuFsaWorkloadFactory.cpp
+else
+
+# ARMNN_GPU_FSA_ENABLED == 0
+# No source file will be compiled for the GPU Dynamic Fusion backend
+
+BACKEND_SOURCES :=
+
+endif
+
+# BACKEND_TEST_SOURCES contains the list of files to be included
+# in the Android unit test build (armnn-tests) and it is picked
+# up by the Android.mk file in the root of ArmNN
+
+# The variable to enable/disable the GPU Dynamic Fusion backend
+# (ARMNN_GPU_FSA_ENABLED is declared in android-nn-driver/Android.mk)
+ifeq ($(ARMNN_GPU_FSA_ENABLED),1)
+
+# ARMNN_GPU_FSA_ENABLED == 1
+# Include the source files for the GPU Dynamic Fusion backend tests
+
+BACKEND_TEST_SOURCES := \
+ test/GpuFsaEndToEndTests.cpp \
+ test/GpuFsaLayerSupportTests.cpp \
+ test/GpuFsaLayerTests.cpp \
+ test/GpuFsaOptimizedNetworkTests.cpp
+else
+
+# ARMNN_GPU_FSA_ENABLED == 0
+# No source file will be compiled for the GPU Dynamic Fusion backend tests
+
+BACKEND_TEST_SOURCES :=
+
+endif
diff --git a/src/backends/gpuFsa/test/CMakeLists.txt b/src/backends/gpuFsa/test/CMakeLists.txt
new file mode 100644
index 0000000000..c600589768
--- /dev/null
+++ b/src/backends/gpuFsa/test/CMakeLists.txt
@@ -0,0 +1,18 @@
+#
+# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnGpuFsaBackendUnitTests_sources
+ GpuFsaEndToEndTests.cpp
+ GpuFsaLayerTests.cpp
+ GpuFsaLayerSupportTests.cpp
+ GpuFsaOptimizedNetworkTests.cpp
+)
+
+add_library(armnnGpuFsaBackendUnitTests OBJECT ${armnnGpuFsaBackendUnitTests_sources})
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party/doctest)
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
new file mode 100644
index 0000000000..ff2b185c53
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -0,0 +1,8 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "backendsCommon/test/EndToEndTestImpl.hpp"
+
+#include <doctest/doctest.h> \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
new file mode 100644
index 0000000000..09aab3f7f0
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -0,0 +1,13 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Optional.hpp>
+#include <armnn/Types.hpp>
+
+#include <doctest/doctest.h>
+
+#include <iostream>
+
+using namespace armnn; \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerTests.cpp
new file mode 100644
index 0000000000..9c67e1f9b9
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaLayerTests.cpp
@@ -0,0 +1,12 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaWorkloadFactoryHelper.hpp"
+
+#include <backendsCommon/test/LayerTests.hpp>
+
+#include <gpuFsa/GpuFsaWorkloadFactory.hpp>
+
+#include <UnitTests.hpp> \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
new file mode 100644
index 0000000000..fa97b135df
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -0,0 +1,11 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/INetwork.hpp>
+
+#include <GraphUtils.hpp>
+#include <TestUtils.hpp>
+
+#include <doctest/doctest.h> \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp b/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
new file mode 100644
index 0000000000..d68b0c494c
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+#include <gpuFsa/GpuFsaWorkloadFactory.hpp>
+#include "gpuFsa/GpuFsaTensorHandleFactory.hpp"
+
+namespace
+{
+
+template<>
+struct WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>
+{
+ static armnn::IBackendInternal::IMemoryManagerSharedPtr GetMemoryManager()
+ {
+ armnn::GpuFsaBackend backend;
+ return backend.CreateMemoryManager();
+ }
+
+ static armnn::GpuFsaWorkloadFactory GetFactory(
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
+ {
+ IgnoreUnused(memoryManager);
+ return armnn::GpuFsaWorkloadFactory();
+ }
+
+ static armnn::GpuFsaTensorHandleFactory GetTensorHandleFactory(
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
+ {
+
+ return armnn::GpuFsaTensorHandleFactory(
+ armnn::PolymorphicPointerDowncast<armnn::GpuFsaMemoryManager>(memoryManager));
+ }
+};
+
+using GpuFsaWorkloadFactoryHelper = WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>;
+
+} // anonymous namespace
diff --git a/src/backends/gpuFsa/workloads/CMakeLists.txt b/src/backends/gpuFsa/workloads/CMakeLists.txt
new file mode 100644
index 0000000000..78f3a98eb1
--- /dev/null
+++ b/src/backends/gpuFsa/workloads/CMakeLists.txt
@@ -0,0 +1,16 @@
+#
+# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnGpuFsaBackendWorkloads_sources
+ GpuFsaBaseWorkload.hpp
+)
+
+add_library(armnnGpuFsaBackendWorkloads OBJECT ${armnnGpuFsaBackendWorkloads_sources})
+target_include_directories(armnnGpuFsaBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnGpuFsaBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
+target_include_directories(armnnGpuFsaBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
+target_include_directories(armnnGpuFsaBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling)
+target_include_directories(armnnGpuFsaBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include)
+target_include_directories(armnnGpuFsaBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/profiling/client/include)
diff --git a/src/backends/gpuFsa/workloads/GpuFsaBaseWorkload.hpp b/src/backends/gpuFsa/workloads/GpuFsaBaseWorkload.hpp
new file mode 100644
index 0000000000..c1b0f7fe97
--- /dev/null
+++ b/src/backends/gpuFsa/workloads/GpuFsaBaseWorkload.hpp
@@ -0,0 +1,39 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/backends/Workload.hpp>
+
+namespace armnn
+{
+
+template <typename QueueDescriptor>
+class GpuFsaBaseWorkload : public BaseWorkload<QueueDescriptor>
+{
+public:
+ GpuFsaBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<QueueDescriptor>(descriptor, info)
+ {}
+
+ virtual bool SupportsTensorHandleReplacement() const override
+ {
+ return true;
+ }
+
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+ {
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ }
+
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+ {
+ this->m_Data.m_Outputs[slot] = tensorHandle;
+ }
+};
+
+} //namespace armnn \ No newline at end of file