aboutsummaryrefslogtreecommitdiff
path: root/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp')
-rw-r--r--src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp58
1 files changed, 17 insertions, 41 deletions
diff --git a/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp b/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp
index 687c8c0ac8..6d13879f51 100644
--- a/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp
+++ b/src/backends/gpuFsa/GpuFsaWorkloadFactory.cpp
@@ -1,10 +1,10 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include <Layer.hpp>
-#include <armnn/backends/MemCopyWorkload.hpp>
-#include <armnn/backends/TensorHandle.hpp>
+
#include "GpuFsaWorkloadFactory.hpp"
#include "GpuFsaBackendId.hpp"
#include "GpuFsaTensorHandle.hpp"
@@ -17,11 +17,9 @@ namespace
static const BackendId s_Id{GpuFsaBackendId()};
}
template <typename QueueDescriptorType>
-std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::MakeWorkload(const QueueDescriptorType& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
- IgnoreUnused(descriptor);
- IgnoreUnused(info);
return nullptr;
}
@@ -64,51 +62,29 @@ bool GpuFsaWorkloadFactory::IsLayerSupported(const Layer& layer,
return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
}
-bool GpuFsaWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
- Optional<DataType> dataType,
- std::string& outReasonIfUnsupported,
- const ModelOptions& modelOptions)
-{
- return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
-}
-
std::unique_ptr<ITensorHandle> GpuFsaWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
- const bool isMemoryManaged) const
+ const bool /*isMemoryManaged*/) const
{
- if (isMemoryManaged)
- {
- return std::make_unique<GpuFsaTensorHandle>(tensorInfo, m_MemoryManager);
- }
- else
- {
- return std::make_unique<GpuFsaTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
- }
+ std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo);
+ tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
+
+ return tensorHandle;
}
std::unique_ptr<ITensorHandle> GpuFsaWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout,
- const bool isMemoryManaged) const
+ const bool /*isMemoryManaged*/) const
{
- IgnoreUnused(dataLayout);
+ std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo, dataLayout);
+ tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
- if (isMemoryManaged)
- {
- return std::make_unique<GpuFsaTensorHandle>(tensorInfo, m_MemoryManager);
- }
- else
- {
- return std::make_unique<GpuFsaTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
- }
+ return tensorHandle;
}
-std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::CreateWorkload(LayerType type,
- const QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::CreateWorkload(LayerType /*type*/,
+ const QueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
- IgnoreUnused(type);
- IgnoreUnused(descriptor);
- IgnoreUnused(info);
-
return nullptr;
}