aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonStackWorkload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads/NeonStackWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonStackWorkload.cpp76
1 files changed, 76 insertions, 0 deletions
diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp
new file mode 100644
index 0000000000..b21494397d
--- /dev/null
+++ b/src/backends/neon/workloads/NeonStackWorkload.cpp
@@ -0,0 +1,76 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "NeonStackWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+#include <boost/numeric/conversion/cast.hpp>
+#include <boost/polymorphic_pointer_cast.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+namespace
+{
+int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
+{
+ const int intAxis = boost::numeric_cast<int>(axis);
+ return boost::numeric_cast<int>(inputDimensions) - intAxis;
+}
+} //namespace
+
+arm_compute::Status NeonStackWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor)
+{
+ std::vector<arm_compute::TensorInfo> aclInputs;
+ for (const TensorInfo* input : inputs)
+ {
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
+ aclInputs.emplace_back(aclInputInfo);
+ }
+
+ std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
+ for (arm_compute::ITensorInfo& input : aclInputs)
+ {
+ aclInputPtrs.emplace_back(&input);
+ }
+
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+ int aclAxis = CalcAxis(descriptor.m_Axis, descriptor.m_InputShape.GetNumDimensions());
+ return arm_compute::NEStackLayer::validate(aclInputPtrs, aclAxis, &aclOutputInfo);
+}
+
+NeonStackWorkload::NeonStackWorkload(const StackQueueDescriptor& descriptor, const WorkloadInfo& info)
+: BaseWorkload<StackQueueDescriptor>(descriptor, info)
+{
+ std::vector<arm_compute::ITensor*> aclInputs;
+ for (auto input : m_Data.m_Inputs)
+ {
+ arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor();
+ aclInputs.emplace_back(&aclInput);
+ }
+ arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+ m_Data.m_Outputs[0])->GetTensor();
+
+ m_Layer.reset(new arm_compute::NEStackLayer());
+ int aclAxis = CalcAxis(descriptor.m_Parameters.m_Axis, descriptor.m_Parameters.m_InputShape.GetNumDimensions());
+ m_Layer->configure(aclInputs, aclAxis, &output);
+}
+
+void NeonStackWorkload::Execute() const
+{
+ if (m_Layer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonStackWorkload_Execute");
+ m_Layer->run();
+ }
+}
+
+} //namespace armnn