// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "RefStackWorkload.hpp" #include "RefWorkloadUtils.hpp" #include "Stack.hpp" #include namespace armnn { RefStackWorkload::RefStackWorkload(const StackQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) {} void RefStackWorkload::Execute() const { Execute(m_Data.m_Inputs, m_Data.m_Outputs); } void RefStackWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) { Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); } void RefStackWorkload::Execute(std::vector inputs, std::vector outputs) const { ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStackWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); std::vector>> inputDecoders; for (unsigned int i=0; i(GetTensorInfo(inputs[i]), inputs[i]->Map())); } std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), outputs[0]->Map()); Stack(m_Data, inputDecoders, *outputEncoder, inputInfo, outputInfo); } } // namespace armnn