aboutsummaryrefslogtreecommitdiff
path: root/src/backends/gpuFsa/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/gpuFsa/test')
-rw-r--r--src/backends/gpuFsa/test/CMakeLists.txt19
-rw-r--r--src/backends/gpuFsa/test/GpuFsaDefaultAllocatorTests.cpp193
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp8
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp64
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerTests.cpp12
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp137
-rw-r--r--src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp45
7 files changed, 478 insertions, 0 deletions
diff --git a/src/backends/gpuFsa/test/CMakeLists.txt b/src/backends/gpuFsa/test/CMakeLists.txt
new file mode 100644
index 0000000000..66091e90df
--- /dev/null
+++ b/src/backends/gpuFsa/test/CMakeLists.txt
@@ -0,0 +1,19 @@
+#
+# Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnGpuFsaBackendUnitTests_sources
+ GpuFsaDefaultAllocatorTests.cpp
+ GpuFsaEndToEndTests.cpp
+ GpuFsaLayerTests.cpp
+ GpuFsaLayerSupportTests.cpp
+ GpuFsaOptimizedNetworkTests.cpp
+)
+
+add_library(armnnGpuFsaBackendUnitTests OBJECT ${armnnGpuFsaBackendUnitTests_sources})
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
+target_include_directories(armnnGpuFsaBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party/doctest)
diff --git a/src/backends/gpuFsa/test/GpuFsaDefaultAllocatorTests.cpp b/src/backends/gpuFsa/test/GpuFsaDefaultAllocatorTests.cpp
new file mode 100644
index 0000000000..17d5952217
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaDefaultAllocatorTests.cpp
@@ -0,0 +1,193 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/backends/ICustomAllocator.hpp>
+#include <armnn/BackendRegistry.hpp>
+#include <armnn/Descriptors.hpp>
+#include <armnn/Exceptions.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/backends/TensorHandle.hpp>
+// Requires the OpenCl backend to be included (GpuFsa)
+#include <gpuFsa/GpuFsaBackend.hpp>
+#include <doctest/doctest.h>
+#include <backendsCommon/DefaultAllocator.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
+#include <gpuFsa/GpuFsaBackendDefaultAllocator.hpp>
+
+using namespace armnn;
+
+namespace
+{
+
+TEST_SUITE("DefaultAllocatorTests")
+{
+
+TEST_CASE("DefaultAllocatorTest")
+{
+ float number = 3;
+
+ TensorInfo inputTensorInfo(TensorShape({1, 1}), DataType::Float32);
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ auto customAllocator = std::make_shared<DefaultAllocator>();
+ options.m_CustomAllocatorMap = {{"GpuFsa", std::move(customAllocator)}};
+ IRuntimePtr run = IRuntime::Create(options);
+
+ // Creates structures for input & output
+ unsigned int numElements = inputTensorInfo.GetNumElements();
+ size_t totalBytes = numElements * sizeof(float);
+
+ void* alignedInputPtr = options.m_CustomAllocatorMap["GpuFsa"]->allocate(totalBytes, 0);
+
+ auto* inputPtr = reinterpret_cast<float*>(alignedInputPtr);
+ std::fill_n(inputPtr, numElements, number);
+ CHECK(inputPtr[0] == 3);
+
+ auto& backendRegistry = armnn::BackendRegistryInstance();
+ backendRegistry.DeregisterAllocator(GpuFsaBackend::GetIdStatic());
+}
+
+TEST_CASE("DefaultAllocatorTestMulti")
+{
+ float number = 3;
+
+ TensorInfo inputTensorInfo(TensorShape({2, 1}), DataType::Float32);
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ auto customAllocator = std::make_shared<DefaultAllocator>();
+ options.m_CustomAllocatorMap = {{"GpuFsa", std::move(customAllocator)}};
+ IRuntimePtr run = IRuntime::Create(options);
+
+ // Creates structures for input & output
+ unsigned int numElements = inputTensorInfo.GetNumElements();
+ size_t totalBytes = numElements * sizeof(float);
+
+ void* alignedInputPtr = options.m_CustomAllocatorMap["GpuFsa"]->allocate(totalBytes, 0);
+ void* alignedInputPtr2 = options.m_CustomAllocatorMap["GpuFsa"]->allocate(totalBytes, 0);
+
+ auto* inputPtr = reinterpret_cast<float*>(alignedInputPtr);
+ std::fill_n(inputPtr, numElements, number);
+ CHECK(inputPtr[0] == 3);
+ CHECK(inputPtr[1] == 3);
+
+ auto* inputPtr2 = reinterpret_cast<float*>(alignedInputPtr2);
+ std::fill_n(inputPtr2, numElements, number);
+ CHECK(inputPtr2[0] == 3);
+ CHECK(inputPtr2[1] == 3);
+
+ // No overlap
+ CHECK(inputPtr[0] == 3);
+ CHECK(inputPtr[1] == 3);
+
+ auto& backendRegistry = armnn::BackendRegistryInstance();
+ backendRegistry.DeregisterAllocator(GpuFsaBackend::GetIdStatic());
+}
+
+TEST_CASE("DefaultAllocatorTestMock")
+{
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ IRuntimePtr run = IRuntime::Create(options);
+
+ // Initialize Mock Backend
+ MockBackendInitialiser initialiser;
+ auto factoryFun = BackendRegistryInstance().GetFactory(MockBackend().GetIdStatic());
+ CHECK(factoryFun != nullptr);
+ auto backend = factoryFun();
+ auto defaultAllocator = backend->GetDefaultAllocator();
+
+ // GetMemorySourceType
+ CHECK(defaultAllocator->GetMemorySourceType() == MemorySource::Malloc);
+
+ size_t totalBytes = 1 * sizeof(float);
+ // Allocate
+ void* ptr = defaultAllocator->allocate(totalBytes, 0);
+
+ // GetMemoryRegionAtOffset
+ CHECK(defaultAllocator->GetMemoryRegionAtOffset(ptr, 0, 0));
+
+ // Free
+ defaultAllocator->free(ptr);
+
+ // Clean up
+ auto& backendRegistry = armnn::BackendRegistryInstance();
+ backendRegistry.Deregister(MockBackend().GetIdStatic());
+ backendRegistry.DeregisterAllocator(GpuFsaBackend::GetIdStatic());
+}
+
+}
+
+
+TEST_SUITE("GpuFsaDefaultAllocatorTests")
+{
+
+TEST_CASE("GpuFsaDefaultAllocatorTest")
+{
+ float number = 3;
+
+ TensorInfo inputTensorInfo(TensorShape({1, 1}), DataType::Float32);
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ auto customAllocator = std::make_shared<GpuFsaBackendDefaultAllocator>();
+ options.m_CustomAllocatorMap = {{"GpuFsa", std::move(customAllocator)}};
+ IRuntimePtr run = IRuntime::Create(options);
+
+ // Creates structures for input & output
+ unsigned int numElements = inputTensorInfo.GetNumElements();
+ size_t totalBytes = numElements * sizeof(float);
+
+ void* alignedInputPtr = options.m_CustomAllocatorMap["GpuFsa"]->allocate(totalBytes, 0);
+
+ auto* inputPtr = reinterpret_cast<float*>(alignedInputPtr);
+ std::fill_n(inputPtr, numElements, number);
+ CHECK(inputPtr[0] == 3);
+
+ auto& backendRegistry = armnn::BackendRegistryInstance();
+ backendRegistry.DeregisterAllocator(GpuFsaBackend::GetIdStatic());
+}
+
+TEST_CASE("GpuFsaDefaultAllocatorTestMulti")
+{
+ float number = 3;
+
+ TensorInfo inputTensorInfo(TensorShape({2, 1}), DataType::Float32);
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ auto customAllocator = std::make_shared<GpuFsaBackendDefaultAllocator>();
+ options.m_CustomAllocatorMap = {{"GpuFsa", std::move(customAllocator)}};
+ IRuntimePtr run = IRuntime::Create(options);
+
+ // Creates structures for input & output
+ unsigned int numElements = inputTensorInfo.GetNumElements();
+ size_t totalBytes = numElements * sizeof(float);
+
+ void* alignedInputPtr = options.m_CustomAllocatorMap["GpuFsa"]->allocate(totalBytes, 0);
+ void* alignedInputPtr2 = options.m_CustomAllocatorMap["GpuFsa"]->allocate(totalBytes, 0);
+
+ auto* inputPtr = reinterpret_cast<float*>(alignedInputPtr);
+ std::fill_n(inputPtr, numElements, number);
+ CHECK(inputPtr[0] == 3);
+ CHECK(inputPtr[1] == 3);
+
+ auto* inputPtr2 = reinterpret_cast<float*>(alignedInputPtr2);
+ std::fill_n(inputPtr2, numElements, number);
+ CHECK(inputPtr2[0] == 3);
+ CHECK(inputPtr2[1] == 3);
+
+ // No overlap
+ CHECK(inputPtr[0] == 3);
+ CHECK(inputPtr[1] == 3);
+
+ auto& backendRegistry = armnn::BackendRegistryInstance();
+ backendRegistry.DeregisterAllocator(GpuFsaBackend::GetIdStatic());
+}
+
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
new file mode 100644
index 0000000000..1d6b99a31f
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -0,0 +1,8 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "backendsCommon/test/EndToEndTestImpl.hpp"
+
+#include <doctest/doctest.h> \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
new file mode 100644
index 0000000000..f162df0b55
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -0,0 +1,64 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Optional.hpp>
+#include <armnn/Types.hpp>
+
+#include <gpuFsa/GpuFsaLayerSupport.hpp>
+
+#include <doctest/doctest.h>
+
+#include <iostream>
+
+using namespace armnn;
+
+TEST_SUITE("GpuFsaLayerSupport")
+{
+
+TEST_CASE("IsLayerSupportedGpuFsaConv2d")
+{
+ TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+ TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
+ TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+ TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
+
+ Convolution2dDescriptor desc;
+ desc.m_BiasEnabled = true;
+ desc.m_DataLayout = DataLayout::NHWC;
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::Convolution2d,
+ {inputInfo, outputInfo, weightsInfo, biasesInfo},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedGpuFsaConv2dUnsupported")
+{
+ TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+ TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
+ TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+
+ // NCHW is unsupported.
+ Convolution2dDescriptor desc;
+ desc.m_DataLayout = DataLayout::NCHW;
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::Convolution2d,
+ {inputInfo, outputInfo, weightsInfo, TensorInfo()},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(!supported);
+ REQUIRE(reasonIfNotSupported.find("NCHW not supported by this kernel") != std::string::npos);
+}
+
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerTests.cpp
new file mode 100644
index 0000000000..e032922d17
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaLayerTests.cpp
@@ -0,0 +1,12 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaWorkloadFactoryHelper.hpp"
+
+#include <backendsCommon/test/LayerTests.hpp>
+
+#include <gpuFsa/GpuFsaWorkloadFactory.hpp>
+
+#include <UnitTests.hpp> \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
new file mode 100644
index 0000000000..7e094cec1e
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -0,0 +1,137 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/INetwork.hpp>
+
+#include <GraphUtils.hpp>
+#include <TestUtils.hpp>
+
+#include <doctest/doctest.h>
+
+using namespace armnn;
+
+TEST_SUITE("GpuFsaOptimizedNetwork")
+{
+
+TEST_CASE("SingleConv2dSupportedOptimizedNetwork")
+{
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ TensorInfo inputInfo({ 1, 5, 5, 1 }, DataType::Float32);
+ TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
+ TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+ TensorInfo biasesInfo({ 1 }, DataType::Float32, 0.0f, 0, true);
+
+ Convolution2dDescriptor desc;
+ desc.m_BiasEnabled = true;
+ desc.m_DataLayout = DataLayout::NHWC;
+
+ auto inputLayer = network->AddInputLayer(0, "input");
+ auto weightLayer = network->AddConstantLayer(ConstTensor(weightsInfo, nullptr), "weights");
+ auto biasLayer = network->AddConstantLayer(ConstTensor(biasesInfo, nullptr), "bias");
+ auto convLayer = network->AddConvolution2dLayer(desc, "conv2d");
+ auto outputLayer = network->AddOutputLayer(1, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+ weightLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
+ weightLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+
+ biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+
+ convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+
+TEST_CASE("TwoConv2dSupportedOptimizedNetwork")
+{
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ TensorInfo inputInfo({ 1, 5, 5, 1 }, DataType::Float32);
+ TensorInfo intermediateInfo({ 1, 3, 3, 1 }, DataType::Float32);
+ TensorInfo outputInfo({ 1, 1, 1, 1 }, DataType::Float32);
+ TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+ TensorInfo biasesInfo({ 1 }, DataType::Float32, 0.0f, 0, true);
+
+ Convolution2dDescriptor desc;
+ desc.m_BiasEnabled = true;
+ desc.m_DataLayout = DataLayout::NHWC;
+
+ auto inputLayer = network->AddInputLayer(0, "input");
+
+ auto weightLayer1 = network->AddConstantLayer(ConstTensor(weightsInfo, nullptr), "weights");
+ auto biasLayer1 = network->AddConstantLayer(ConstTensor(biasesInfo, nullptr), "bias");
+ auto convLayer1 = network->AddConvolution2dLayer(desc, "conv2d");
+
+ auto weightLayer2 = network->AddConstantLayer(ConstTensor(weightsInfo, nullptr), "weights");
+ auto biasLayer2 = network->AddConstantLayer(ConstTensor(biasesInfo, nullptr), "bias");
+ auto convLayer2 = network->AddConvolution2dLayer(desc, "conv2d");
+
+ auto outputLayer = network->AddOutputLayer(0, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+ weightLayer1->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(1));
+ weightLayer1->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+
+ biasLayer1->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(2));
+ biasLayer1->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+
+ convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
+
+ weightLayer2->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(1));
+ weightLayer2->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+
+ biasLayer2->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(2));
+ biasLayer2->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+
+ convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp b/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
new file mode 100644
index 0000000000..c1d75d625b
--- /dev/null
+++ b/src/backends/gpuFsa/test/GpuFsaWorkloadFactoryHelper.hpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+#include <gpuFsa/GpuFsaWorkloadFactory.hpp>
+#include "gpuFsa/GpuFsaTensorHandleFactory.hpp"
+
+namespace
+{
+
+template<>
+struct WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>
+{
+ static armnn::IBackendInternal::IMemoryManagerSharedPtr GetMemoryManager()
+ {
+ armnn::GpuFsaBackend backend;
+ return backend.CreateMemoryManager();
+ }
+
+ static armnn::GpuFsaWorkloadFactory GetFactory(
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&)
+ {
+ return armnn::GpuFsaWorkloadFactory();
+ }
+
+ static armnn::GpuFsaTensorHandleFactory GetTensorHandleFactory(
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
+ {
+
+ return armnn::GpuFsaTensorHandleFactory(
+ armnn::PolymorphicPointerDowncast<armnn::GpuFsaMemoryManager>(memoryManager));
+ }
+};
+
+using GpuFsaWorkloadFactoryHelper = WorkloadFactoryHelper<armnn::GpuFsaWorkloadFactory>;
+
+} // anonymous namespace