aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl/workloads')
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt1
-rw-r--r--src/backends/cl/workloads/ClSplitterWorkload.cpp112
-rw-r--r--src/backends/cl/workloads/ClSplitterWorkload.hpp19
3 files changed, 126 insertions, 6 deletions
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index abbed0e754..a3eedd028e 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -64,6 +64,7 @@ list(APPEND armnnClBackendWorkloads_sources
ClSoftmaxUint8Workload.hpp
ClSpaceToBatchNdWorkload.hpp
ClSpaceToBatchNdWorkload.cpp
+ ClSplitterWorkload.cpp
ClSplitterWorkload.hpp
ClStridedSliceWorkload.cpp
ClStridedSliceWorkload.hpp
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
new file mode 100644
index 0000000000..9bbbcab797
--- /dev/null
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -0,0 +1,112 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClSplitterWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <cl/ClTensorHandle.hpp>
+
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+namespace
+{
+ unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int splitAxis)
+ {
+ return (numDimensions - splitAxis) - 1;
+ }
+
+} //namespace
+
+arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ unsigned int splitAxis)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+
+ size_t numOutputs = outputs.size();
+
+ std::vector<arm_compute::TensorInfo> aclOutputs;
+ aclOutputs.reserve(numOutputs);
+
+ std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
+ aclOutputPtr.reserve(numOutputs);
+
+ for (size_t i = 0u; i < outputs.size(); ++i)
+ {
+ aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
+ aclOutputPtr.emplace_back(&aclOutputs.back());
+ }
+
+ unsigned int aclAxis = CalcAclAxis(input.GetNumDimensions(), splitAxis);
+ return arm_compute::CLSplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
+}
+
+ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<SplitterQueueDescriptor>(descriptor, info)
+{
+ bool allOutputsAreSubtensors = true;
+
+ // Check that all outputs are sub-tensors
+ for (auto output : m_Data.m_Outputs)
+ {
+ if (output && !output->GetParent())
+ {
+ // Non sub-tensor input found so we need to execute the split function
+ allOutputsAreSubtensors = false;
+ break;
+ }
+ }
+
+ if (allOutputsAreSubtensors)
+ {
+ // Can skip configuring the split function since it's not executed
+ return;
+ }
+
+ arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ m_Data.m_Inputs[0])->GetTensor();
+
+ std::vector<arm_compute::ICLTensor *> aclOutputs;
+ for (auto output : m_Data.m_Outputs)
+ {
+ arm_compute::ICLTensor& aclOutput = boost::polymorphic_pointer_downcast<IClTensorHandle>(output)->GetTensor();
+ aclOutputs.emplace_back(&aclOutput);
+ }
+
+ // Create the layer function
+ m_Layer.reset(new arm_compute::CLSplit());
+
+ // Configure input and output tensors
+ std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
+ if (splitAxis.size() != 1)
+ {
+ throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
+ }
+
+ unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
+ m_Layer->configure(&input, aclOutputs, aclAxis);
+
+ // Prepare
+ m_Layer->prepare();
+}
+
+void ClSplitterWorkload::Execute() const
+{
+ if (m_Layer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClSplitterWorkload_Execute");
+ m_Layer->run();
+ }
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.hpp b/src/backends/cl/workloads/ClSplitterWorkload.hpp
index 950335902e..d024452d78 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.hpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.hpp
@@ -7,19 +7,26 @@
#include <backendsCommon/Workload.hpp>
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+#include <functional>
+
namespace armnn
{
-// Base class template providing an implementation of the Splitter layer common to all data types.
+arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ unsigned int splitAxis);
+
class ClSplitterWorkload : public BaseWorkload<SplitterQueueDescriptor>
{
public:
- using BaseWorkload<SplitterQueueDescriptor>::BaseWorkload;
+ ClSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
- void Execute() const override
- {
- // With subtensors, splitter is a no-op.
- }
+private:
+ mutable std::unique_ptr<arm_compute::CLSplit> m_Layer;
};
} //namespace armnn