20 using namespace armcomputetensorutils;
24 unsigned int CalcAclAxis(
unsigned int numDimensions,
unsigned int splitAxis)
26 return (numDimensions - splitAxis) - 1;
32 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
33 unsigned int splitAxis)
35 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
37 size_t numOutputs = outputs.size();
39 std::vector<arm_compute::TensorInfo> aclOutputs;
40 aclOutputs.reserve(numOutputs);
42 std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
43 aclOutputPtr.reserve(numOutputs);
45 for (
size_t i = 0u; i < outputs.size(); ++i)
47 aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
48 aclOutputPtr.emplace_back(&aclOutputs.back());
52 return arm_compute::CLSplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
57 const arm_compute::CLCompileContext&)
65 bool allOutputsAreSubtensors =
true;
70 if (output && !output->GetParent())
73 allOutputsAreSubtensors =
false;
78 if (allOutputsAreSubtensors)
84 arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
87 std::vector<arm_compute::ICLTensor *> aclOutputs;
90 arm_compute::ICLTensor& aclOutput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(output)->GetTensor();
91 aclOutputs.emplace_back(&aclOutput);
98 if (splitAxis.size() != 1)
104 auto layer = std::make_unique<arm_compute::CLSplit>();
107 layer->configure(&input, aclOutputs, aclAxis);
113 m_Layer = std::move(layer);
#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid)
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
uint32_t GetNumDimensions() const
Get the number of dimensions.
Copyright (c) 2021 ARM Limited and Contributors.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
SplitterQueueDescriptor m_Data
profiling::ProfilingGuid GetGuid() const final
std::vector< ITensorHandle * > m_Outputs
void Execute() const override
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
unsigned int GetNumDimensions() const
ClSplitterWorkload(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)