35 template<
typename FactoryType>
36 void SplitterLayer::CreateTensors(
const FactoryType& factory)
40 bool useSubTensors = factory.SupportsSubTensors();
51 std::vector<std::unique_ptr<ITensorHandle>> subTensors;
60 auto CreateSubTensor = [&]()
66 return factory.CreateSubTensorHandle(*inputData,
70 return std::unique_ptr<ITensorHandle>();
73 auto subTensor = CreateSubTensor();
76 useSubTensors =
false;
79 subTensors.push_back(std::move(subTensor));
85 for (
auto& subTensor : subTensors)
105 const bool IsMemoryManaged)
107 boost::ignore_unused(IsMemoryManaged);
113 CreateTensors(workloadFactory);
118 BOOST_ASSERT(handleFactory);
119 CreateTensors(*handleFactory);
130 boost::ignore_unused(inputShapes);
132 std::vector<TensorShape> outShapes;
144 std::vector<TensorShape> views;
157 ConditionalThrowIfNotEqual<LayerValidationException>(
158 "SplitterLayer: View sizes must match output tensor shapes.",
160 inferredShapes[viewIdx]);
void ValidateTensorShapesFromInputs() override
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
void Accept(ILayerVisitor &visitor) const override
const char * GetName() const override
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
SplitterLayer * Clone(Graph &graph) const override
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
static const FactoryId LegacyFactoryId
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
const std::vector< InputSlot > & GetInputSlots() const
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
uint32_t GetNumDimensions() const
Get the number of dimensions.
This layer represents a split operation.
const ViewsDescriptor & GetParameters() const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
ViewsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
ClWorkloadFactory FactoryType
void Splitter(const SplitterQueueDescriptor &data)
std::vector< OutputHandler > m_OutputHandlers
ITensorHandle * GetData() const
Gets the allocated tensor memory.
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
std::vector< ViewOrigin > m_ViewOrigins
SplitterLayer(const ViewsDescriptor ¶m, const char *name)
const TensorShape & GetShape() const
const TensorInfo & GetTensorInfo() const override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
virtual void VisitSplitterLayer(const IConnectableLayer *layer, const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)=0
uint32_t GetNumViews() const
Get the number of views.