aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2019-01-16 09:53:09 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-01-16 16:30:10 +0000
commitb89b05f048a566a8c825f1d223966bc5a6abc3d5 (patch)
tree1a7027e329fd687456a165a0d88be865626a407b
parent6c8e8e7594604cca672486db224c1d041b39bfb9 (diff)
downloadarmnn-b89b05f048a566a8c825f1d223966bc5a6abc3d5.tar.gz
IVGCVSW-2508 Add no-op factory implementations and layer for Gather operator
* Added GatherQueueDescriptor to WorkloadData * Added CreateGather function in WorkloadFactory.hpp * Added stub implementation of the CreateGreater function in workload factories * Added GatherLayer stub implementation * Added AddGatherLayer to Network * Added IsGatherSupported to LayerSupportBase Change-Id: I0408fd54e88a7d4e3d9e1c2811a9323f0da52a04
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/ILayerSupport.hpp5
-rw-r--r--include/armnn/INetwork.hpp5
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp10
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/GatherLayer.cpp37
-rw-r--r--src/armnn/layers/GatherLayer.hpp41
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp8
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp6
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp29
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp6
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp3
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp6
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp3
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp3
25 files changed, 188 insertions, 9 deletions
diff --git a/Android.mk b/Android.mk
index c61c7103d7..62a992d434 100644
--- a/Android.mk
+++ b/Android.mk
@@ -98,6 +98,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/FakeQuantizationLayer.cpp \
src/armnn/layers/FloorLayer.cpp \
src/armnn/layers/FullyConnectedLayer.cpp \
+ src/armnn/layers/GatherLayer.cpp \
src/armnn/layers/GreaterLayer.cpp \
src/armnn/layers/InputLayer.cpp \
src/armnn/layers/L2NormalizationLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 755f40858a..9651f076d2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -202,6 +202,8 @@ list(APPEND armnn_sources
src/armnn/layers/FloorLayer.cpp
src/armnn/layers/FullyConnectedLayer.hpp
src/armnn/layers/FullyConnectedLayer.cpp
+ src/armnn/layers/GatherLayer.cpp
+ src/armnn/layers/GatherLayer.hpp
src/armnn/layers/GreaterLayer.cpp
src/armnn/layers/GreaterLayer.hpp
src/armnn/layers/InputLayer.hpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 929896d285..8d800f4cc0 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -137,6 +137,11 @@ public:
const TensorInfo* cellToOutputWeights,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index f31176ad7c..05962b95b7 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -343,6 +343,11 @@ public:
/// @ return - Interface for configuring the layer.
virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0;
+ /// Add Gather layer to the network.
+ /// @param name - Optional name for the layer.
+ /// @ return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddGatherLayer(const char* name = nullptr) = 0;
+
protected:
~INetwork() {}
};
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 16a19722df..15f4aa07e2 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -29,6 +29,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::FakeQuantization: return "FakeQuantization";
case LayerType::Floor: return "Floor";
case LayerType::FullyConnected: return "FullyConnected";
+ case LayerType::Gather: return "Gather";
case LayerType::Greater: return "Greater";
case LayerType::Input: return "Input";
case LayerType::L2Normalization: return "L2Normalization";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index f05ea25597..704efdf2b7 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -29,6 +29,7 @@ enum class LayerType
FakeQuantization,
Floor,
FullyConnected,
+ Gather,
Greater,
Input,
L2Normalization,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index b0b3eccb02..b600e4daa9 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -245,6 +245,16 @@ bool IsFullyConnectedSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
}
+bool IsGatherSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output);
+}
+
bool IsGreaterSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 8b4ee0804b..27806c5752 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -21,6 +21,7 @@
#include "layers/FakeQuantizationLayer.hpp"
#include "layers/FloorLayer.hpp"
#include "layers/FullyConnectedLayer.hpp"
+#include "layers/GatherLayer.hpp"
#include "layers/GreaterLayer.hpp"
#include "layers/InputLayer.hpp"
#include "layers/L2NormalizationLayer.hpp"
@@ -88,6 +89,7 @@ DECLARE_LAYER(Equal)
DECLARE_LAYER(FakeQuantization)
DECLARE_LAYER(Floor)
DECLARE_LAYER(FullyConnected)
+DECLARE_LAYER(Gather)
DECLARE_LAYER(Greater)
DECLARE_LAYER(Input)
DECLARE_LAYER(L2Normalization)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 7b9cb3db7f..8a1437a0fb 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -872,6 +872,11 @@ IConnectableLayer* Network::AddRsqrtLayer(const char * name)
return m_Graph->AddLayer<RsqrtLayer>(name);
}
+IConnectableLayer* Network::AddGatherLayer(const char* name)
+{
+ return m_Graph->AddLayer<GatherLayer>(name);
+}
+
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index ba741e9af6..7690fafac0 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -66,6 +66,8 @@ public:
const ConstTensor& biases,
const char* name = nullptr) override;
+ IConnectableLayer* AddGatherLayer(const char* name = nullptr) override;
+
IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
new file mode 100644
index 0000000000..2e5d011599
--- /dev/null
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GatherLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+GatherLayer::GatherLayer(const char* name)
+ : Layer(2, 1, LayerType::Gather, name)
+{
+}
+
+std::unique_ptr<IWorkload> GatherLayer::CreateWorkload(const armnn::Graph& graph,
+ const armnn::IWorkloadFactory& factory) const
+{
+ GatherQueueDescriptor descriptor;
+ return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+GatherLayer* GatherLayer::Clone(Graph& graph) const
+{
+ return CloneBase<GatherLayer>(graph, GetName());
+}
+
+void GatherLayer::ValidateTensorShapesFromInputs()
+{
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
new file mode 100644
index 0000000000..7b3aebe77e
--- /dev/null
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -0,0 +1,41 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "Layer.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a Gather operator.
+class GatherLayer : public Layer
+{
+public:
+ /// Makes a workload for the Gather type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ GatherLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref GatherLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ /// Constructor to create a GatherLayer.
+ /// @param [in] name Optional name for the layer.
+ GatherLayer(const char* name);
+
+ /// Default destructor
+ ~GatherLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 187d2f7d38..2e436578b7 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -156,6 +156,14 @@ bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& input,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& input0,
+ const armnn::TensorInfo& input1,
+ const armnn::TensorInfo& output,
+ armnn::Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index c6f943c7e0..77cb302d5e 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -92,6 +92,11 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 97981e2b8d..072b9a9934 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1051,6 +1051,12 @@ void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
"output");
}
+void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateTwoInputs(workloadInfo, "GatherQueueDescriptor");
+ ValidateSingleOutput(workloadInfo, "GatherQueueDescriptor");
+}
+
void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
// This is internally generated so it should not need validation.
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 453896b912..2d68c9f7ee 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -378,6 +378,11 @@ struct RsqrtQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct GatherQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct PreCompiledQueueDescriptor : QueueDescriptorWithParameters<PreCompiledDescriptor>
{
PreCompiledQueueDescriptor()
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 0f015bd540..a70ec7e231 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -315,6 +315,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Gather:
+ {
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
+ OverrideDataType(input1, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Input:
{
const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
@@ -477,16 +488,16 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
- case LayerType::MemCopy:
- {
- const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
- const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ case LayerType::MemCopy:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
- OverrideDataType(output, dataType),
- reason);
- break;
- }
+ result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Merger:
{
auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer);
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index d516698d3f..dd47dd6e05 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -162,6 +162,9 @@ public:
virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
+
+ virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
};
} //namespace armnn
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index edc58cf514..43c7581b8b 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -344,6 +344,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Floor)
DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
+DECLARE_LAYER_POLICY_1_PARAM(Gather)
+
DECLARE_LAYER_POLICY_1_PARAM(Greater)
DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 28011cfd7b..71c1b89c09 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -362,4 +362,10 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePreCompiled(const PreCompile
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
+ const armnn::WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
} // namespace armnn
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 286e897472..ba2f0664dc 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -153,6 +153,9 @@ public:
virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
private:
template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 3728c86a66..311479a5f7 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -330,4 +330,10 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePreCompiled(const PreCompi
return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
+ const armnn::WorkloadInfo& info) const
+{
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
} // namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 68317ed651..fe9f1b08a2 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -154,6 +154,9 @@ public:
virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
private:
mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 361a3f1f74..cb7d6ea01a 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -324,4 +324,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompil
return nullptr;
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
+ const armnn::WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
} // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 432ac72c6e..443af76265 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -171,6 +171,9 @@ public:
virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
private:
template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>