// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "RefElementwiseWorkload.hpp" #include "Decoders.hpp" #include "ElementwiseFunction.hpp" #include "Encoders.hpp" #include "Profiling.hpp" #include "RefWorkloadUtils.hpp" #include "StringMapping.hpp" #include #include namespace armnn { template RefElementwiseWorkload::RefElementwiseWorkload( const ParentDescriptor& desc, const WorkloadInfo& info) : RefBaseWorkload(desc, info) { } template void RefElementwiseWorkload::Execute() const { Execute(m_Data.m_Inputs, m_Data.m_Outputs); } template void RefElementwiseWorkload::ExecuteAsync( WorkingMemDescriptor &workingMemDescriptor) { Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); } template void RefElementwiseWorkload::Execute( std::vector inputs, std::vector outputs) const { ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, StringMapping::Instance().Get(DebugString)); const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); const TensorShape& inShape0 = inputInfo0.GetShape(); const TensorShape& inShape1 = inputInfo1.GetShape(); const TensorShape& outShape = outputInfo.GetShape(); std::unique_ptr> input0 = MakeDecoder(inputInfo0, inputs[0]->Map()); std::unique_ptr> input1 = MakeDecoder(inputInfo1, inputs[1]->Map()); std::unique_ptr> output= MakeEncoder(outputInfo, outputs[0]->Map()); ElementwiseBinaryFunction(inShape0, inShape1, outShape, *input0, *input1, *output); } } //namespace armnn template class armnn::RefElementwiseWorkload, armnn::AdditionQueueDescriptor, armnn::StringMapping::RefAdditionWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::AdditionQueueDescriptor, armnn::StringMapping::RefAdditionWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::SubtractionQueueDescriptor, armnn::StringMapping::RefSubtractionWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::SubtractionQueueDescriptor, armnn::StringMapping::RefSubtractionWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::MultiplicationQueueDescriptor, armnn::StringMapping::RefMultiplicationWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::MultiplicationQueueDescriptor, armnn::StringMapping::RefMultiplicationWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::DivisionQueueDescriptor, armnn::StringMapping::RefDivisionWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::DivisionQueueDescriptor, armnn::StringMapping::RefDivisionWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::MaximumQueueDescriptor, armnn::StringMapping::RefMaximumWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::MaximumQueueDescriptor, armnn::StringMapping::RefMaximumWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::MinimumQueueDescriptor, armnn::StringMapping::RefMinimumWorkload_Execute>; template class armnn::RefElementwiseWorkload, armnn::MinimumQueueDescriptor, armnn::StringMapping::RefMinimumWorkload_Execute>;