ArmNN
 21.11
RefTransposeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 
10 
11 #include <ResolveType.hpp>
12 
13 namespace armnn
14 {
15 
16 template <armnn::DataType DataType>
18 {
19  Execute(m_Data.m_Inputs, m_Data.m_Outputs);
20 }
21 
22 template <armnn::DataType DataType>
24 {
25  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
26 }
27 
28 template <armnn::DataType DataType>
29 void RefTransposeWorkload<DataType>::Execute(std::vector<ITensorHandle*> inputs,
30  std::vector<ITensorHandle*> outputs) const
31 {
32  using T = ResolveType<DataType>;
33 
34  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute");
35 
36  const ITensorHandle* src = inputs[0];
37  ITensorHandle* dst = outputs[0];
38  const PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
39 
40  armnnUtils::Transpose(GetTensorInfo(src).GetShape(), mappings, src->Map(), dst->Map(), sizeof(T));
41 }
42 
49 
50 } //namespace armnn
CPU Execution: Reference C++ kernels.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers