From 430b5d824a17ddfb31bb29c18407e45e0def958c Mon Sep 17 00:00:00 2001 From: Conor Kennedy Date: Wed, 14 Nov 2018 15:28:28 +0000 Subject: IVGCVSW-2086: Add StridedSlice layer & corresponding no-op factory implementations Change-Id: I906dcb8b4c9b491bf3a661f208c09d0ea1c0fa35 --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 33 ++++++++++++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 5 ++ include/armnn/INetwork.hpp | 7 +++ include/armnn/LayerSupport.hpp | 8 +++ src/armnn/InternalTypes.cpp | 1 + src/armnn/InternalTypes.hpp | 1 + src/armnn/LayerSupport.cpp | 10 ++++ src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 6 +++ src/armnn/Network.hpp | 3 ++ src/armnn/layers/StridedSliceLayer.cpp | 58 ++++++++++++++++++++++ src/armnn/layers/StridedSliceLayer.hpp | 27 ++++++++++ src/backends/backendsCommon/ILayerSupport.cpp | 8 +++ src/backends/backendsCommon/WorkloadData.cpp | 37 ++++++++++++++ src/backends/backendsCommon/WorkloadData.hpp | 6 +++ src/backends/backendsCommon/WorkloadFactory.cpp | 11 ++++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 ++ .../test/IsLayerSupportedTestImpl.hpp | 2 + src/backends/cl/ClWorkloadFactory.cpp | 6 +++ src/backends/cl/ClWorkloadFactory.hpp | 3 ++ src/backends/neon/NeonWorkloadFactory.cpp | 6 +++ src/backends/neon/NeonWorkloadFactory.hpp | 3 ++ src/backends/reference/RefWorkloadFactory.cpp | 6 +++ src/backends/reference/RefWorkloadFactory.hpp | 3 ++ 27 files changed, 259 insertions(+) create mode 100644 src/armnn/layers/StridedSliceLayer.cpp create mode 100644 src/armnn/layers/StridedSliceLayer.hpp diff --git a/Android.mk b/Android.mk index 4742aed6d1..af585af8ca 100644 --- a/Android.mk +++ b/Android.mk @@ -108,6 +108,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/ResizeBilinearLayer.cpp \ src/armnn/layers/SoftmaxLayer.cpp \ src/armnn/layers/SplitterLayer.cpp \ + src/armnn/layers/StridedSliceLayer.cpp \ src/armnn/Descriptors.cpp \ src/armnn/Exceptions.cpp \ src/armnn/Graph.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 0fa1907190..4f13496572 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -229,6 +229,8 @@ list(APPEND armnn_sources src/armnn/layers/SoftmaxLayer.cpp src/armnn/layers/SplitterLayer.hpp src/armnn/layers/SplitterLayer.cpp + src/armnn/layers/StridedSliceLayer.cpp + src/armnn/layers/StridedSliceLayer.hpp src/armnn/layers/SubtractionLayer.cpp src/armnn/layers/SubtractionLayer.hpp src/armnn/Descriptors.cpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 396e427ee4..f8dd18ab8a 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -421,4 +421,37 @@ struct PadDescriptor std::vector> m_PadList; }; +struct StridedSliceDescriptor +{ + StridedSliceDescriptor() + : m_DataLayout(DataLayout::NCHW) + {} + + StridedSliceDescriptor(const std::vector& begin, + const std::vector& end, + const std::vector& stride) + : m_Begin(begin) + , m_End(end) + , m_Stride(stride) + , m_BeginMask(0) + , m_EndMask(0) + , m_ShrinkAxisMask(0) + , m_EllipsisMask(0) + , m_NewAxisMask(0) + , m_DataLayout(DataLayout::NCHW) + {} + + std::vector m_Begin; + std::vector m_End; + std::vector m_Stride; + + int32_t m_BeginMask; + int32_t m_EndMask; + int32_t m_ShrinkAxisMask; + int32_t m_EllipsisMask; + int32_t m_NewAxisMask; + + DataLayoutIndexed m_DataLayout; +}; + } diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index 0e96c360d3..a2acc0717e 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -26,6 +26,7 @@ struct ResizeBilinearDescriptor; struct SoftmaxDescriptor; struct OriginsDescriptor; struct ViewsDescriptor; +struct StridedSliceDescriptor; using MergerDescriptor = OriginsDescriptor; using SplitterDescriptor = ViewsDescriptor; diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 1bf268f409..e679f84109 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -183,6 +183,11 @@ public: const ViewsDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const; + virtual bool IsStridedSliceSupported(const TensorInfo& input, + const TensorInfo& output, + const StridedSliceDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const; + virtual bool IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index df274d6dc1..ac7faab279 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -311,6 +311,13 @@ public: virtual IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr) = 0; + /// Adds a strided slice layer to the network. + /// @param StridedSliceDescriptor - Parameters for the strided slice operation. + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor, + const char* name = nullptr) = 0; + protected: ~INetwork() {} }; diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index bd20f185da..7bed5779d3 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -254,4 +254,12 @@ bool IsPadSupported(const BackendId& backend, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsStridedSliceSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const StridedSliceDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + } diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 3493a3d5a2..dade1f70c3 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -43,6 +43,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Softmax: return "Softmax"; case LayerType::SpaceToBatchNd: return "SpaceToBatchNd"; case LayerType::Splitter: return "Splitter"; + case LayerType::StridedSlice: return "StridedSlice"; case LayerType::Subtraction: return "Subtraction"; default: BOOST_ASSERT_MSG(false, "Unknown layer type"); diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index dc3c55edac..3e2f298119 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -44,6 +44,7 @@ enum class LayerType Softmax, SpaceToBatchNd, Splitter, + StridedSlice, // Last layer goes here. LastLayer, Subtraction = LastLayer, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 78a184a7ce..91aca4ea41 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -403,4 +403,14 @@ bool IsPadSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor); } +bool IsStridedSliceSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const StridedSliceDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor); +} + } diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index bd1297b550..0e873d7c70 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -36,6 +36,7 @@ #include "layers/SoftmaxLayer.hpp" #include "layers/SpaceToBatchNdLayer.hpp" #include "layers/SplitterLayer.hpp" +#include "layers/StridedSliceLayer.hpp" #include "layers/SubtractionLayer.hpp" namespace armnn @@ -95,6 +96,7 @@ DECLARE_LAYER(ResizeBilinear) DECLARE_LAYER(Softmax) DECLARE_LAYER(SpaceToBatchNd) DECLARE_LAYER(Splitter) +DECLARE_LAYER(StridedSlice) DECLARE_LAYER(Subtraction) } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 3b3ee3146a..32464f7757 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -694,6 +694,12 @@ IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, cons return m_Graph->AddLayer(padDescriptor,name); } +IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor, + const char* name) +{ + return m_Graph->AddLayer(stridedSliceDescriptor, name); +} + OptimizedNetwork::OptimizedNetwork(std::unique_ptr graph) : m_Graph(std::move(graph)) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 95cdb28bfb..471ce3ec68 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -128,6 +128,9 @@ public: IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr) override; + IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor, + const char* name = nullptr) override; + private: IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, const ConstTensor& weights, diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp new file mode 100644 index 0000000000..f5e001c03f --- /dev/null +++ b/src/armnn/layers/StridedSliceLayer.cpp @@ -0,0 +1,58 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "StridedSliceLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include +#include + +namespace armnn +{ + +StridedSliceLayer::StridedSliceLayer(const armnn::StridedSliceDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::StridedSlice, param, name) +{ +} + +std::unique_ptr StridedSliceLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + StridedSliceQueueDescriptor descriptor; + + descriptor.m_Parameters.m_Begin = m_Param.m_Begin; + descriptor.m_Parameters.m_End = m_Param.m_End; + descriptor.m_Parameters.m_Stride = m_Param.m_Stride; + + // Optional parameters + descriptor.m_Parameters.m_BeginMask = m_Param.m_BeginMask; + descriptor.m_Parameters.m_EndMask = m_Param.m_EndMask; + descriptor.m_Parameters.m_EllipsisMask = m_Param.m_EllipsisMask; + descriptor.m_Parameters.m_NewAxisMask = m_Param.m_NewAxisMask; + descriptor.m_Parameters.m_ShrinkAxisMask = m_Param.m_ShrinkAxisMask; + + return factory.CreateStridedSlice(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const +{ + return CloneBase(graph, m_Param, GetName()); +} + +void StridedSliceLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "StridedSlice: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +} // namespace armnn \ No newline at end of file diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp new file mode 100644 index 0000000000..33a44243a5 --- /dev/null +++ b/src/armnn/layers/StridedSliceLayer.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +class StridedSliceLayer : public LayerWithParameters +{ +public: + virtual std::unique_ptr CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + StridedSliceLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + StridedSliceLayer(const StridedSliceDescriptor& param, const char* name); + ~StridedSliceLayer() = default; +}; + +} // namespace \ No newline at end of file diff --git a/src/backends/backendsCommon/ILayerSupport.cpp b/src/backends/backendsCommon/ILayerSupport.cpp index 2cd57b7ad7..dc106e344e 100644 --- a/src/backends/backendsCommon/ILayerSupport.cpp +++ b/src/backends/backendsCommon/ILayerSupport.cpp @@ -279,6 +279,14 @@ bool ILayerSupport::IsSplitterSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool ILayerSupport::IsStridedSliceSupported(const TensorInfo& input, + const TensorInfo& output, + const StridedSliceDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 9fbdfe94c2..e1146543ff 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -924,4 +924,41 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c ValidateSingleOutput(workloadInfo, "BatchToSpaceNdQueueDescriptor"); } +void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "StridedSliceQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "StridedSliceQueueDescriptor"); + + const TensorInfo& input = workloadInfo.m_InputTensorInfos[0]; + const uint32_t rank = input.GetNumDimensions(); + + // Begin, End & Stride length must be of rank(input0) + if (m_Parameters.m_Begin.size() != rank) + { + throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0(" + + to_string(rank) + ")"); + } + + if (m_Parameters.m_End.size() != rank) + { + throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0(" + + to_string(rank) + ")"); + } + + if (m_Parameters.m_Stride.size() != rank) + { + throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0(" + + to_string(rank) + ")"); + } + + // Stride entries must be non-zero + for (auto& stride : m_Parameters.m_Stride) + { + if (stride == 0) + { + throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero"); + } + } +} + } //namespace armnn \ No newline at end of file diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index d54a71aa8c..8cc60d0a96 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -339,4 +339,10 @@ struct BatchToSpaceNdQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index bb63b336e9..dc38f1a721 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -592,6 +592,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::StridedSlice: + { + auto cLayer = boost::polymorphic_downcast(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::Subtraction: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index cd1ca25bb2..a1d0400f11 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -138,6 +138,9 @@ public: virtual std::unique_ptr CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& Info) const = 0; + + virtual std::unique_ptr CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& Info) const = 0; }; } //namespace armnn diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 25079058f6..7817e42321 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -374,6 +374,8 @@ DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd) DECLARE_LAYER_POLICY_2_PARAM(Splitter) +DECLARE_LAYER_POLICY_2_PARAM(StridedSlice) + DECLARE_LAYER_POLICY_1_PARAM(Subtraction) diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index 1f112008c9..6199648d84 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -314,4 +314,10 @@ std::unique_ptr ClWorkloadFactory::CreateBatchToSpaceNd(const BatchTo return MakeWorkload(descriptor, info); } +std::unique_ptr ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload(descriptor, info); +} + } // namespace armnn diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index d37a31ffa4..a5560fd01e 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -129,6 +129,9 @@ public: virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + virtual std::unique_ptr CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + private: template static std::unique_ptr MakeWorkload(const QueueDescriptorType& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index ef90240c72..d79373df4e 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -282,4 +282,10 @@ std::unique_ptr NeonWorkloadFactory::CreateBatchToSpaceNd(const Batch return MakeWorkloadHelper(descriptor, info); } +std::unique_ptr NeonWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkloadHelper(descriptor, info); +} + } // namespace armnn diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 8d330634cb..8d7b830260 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -130,6 +130,9 @@ public: virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& Info) const override; + virtual std::unique_ptr CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + private: mutable std::shared_ptr m_MemoryManager; }; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index afffd65285..6d51b3d039 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -276,4 +276,10 @@ std::unique_ptr RefWorkloadFactory::CreateBatchToSpaceNd(const BatchT return MakeWorkload(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload(descriptor, info); +} + } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 91bba84038..af4d98d226 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -146,6 +146,9 @@ public: virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + virtual std::unique_ptr CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; private: template -- cgit v1.2.1