ArmNN
 20.11
RefElementwiseWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Encoders.hpp"
11 #include "Profiling.hpp"
12 #include "RefWorkloadUtils.hpp"
13 #include "StringMapping.hpp"
14 #include <ResolveType.hpp>
15 #include <vector>
16 
17 namespace armnn
18 {
19 
20 template <typename Functor, typename ParentDescriptor, typename armnn::StringMapping::Id DebugString>
22  const ParentDescriptor& desc,
23  const WorkloadInfo& info)
24  : BaseWorkload<ParentDescriptor>(desc, info)
25 {
26 }
27 
28 template <typename Functor, typename ParentDescriptor, typename armnn::StringMapping::Id DebugString>
30 {
31  const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
32  const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
33  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
34 
35  m_Input0 = MakeDecoder<InType>(inputInfo0);
36  m_Input1 = MakeDecoder<InType>(inputInfo1);
37  m_Output = MakeEncoder<OutType>(outputInfo);
38 }
39 
40 template <typename Functor, typename ParentDescriptor, typename armnn::StringMapping::Id DebugString>
42 {
44  const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
45  const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
46  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
47 
48  const TensorShape& inShape0 = inputInfo0.GetShape();
49  const TensorShape& inShape1 = inputInfo1.GetShape();
50  const TensorShape& outShape = outputInfo.GetShape();
51 
52  m_Input0->Reset(m_Data.m_Inputs[0]->Map());
53  m_Input1->Reset(m_Data.m_Inputs[1]->Map());
54  m_Output->Reset(m_Data.m_Outputs[0]->Map());
55 
57  inShape1,
58  outShape,
59  *m_Input0,
60  *m_Input1,
61  *m_Output);
62 }
63 
64 } //namespace armnn
65 
69 
71  armnn::AdditionQueueDescriptor,
72  armnn::StringMapping::RefAdditionWorkload_Execute>;
73 
77 
79  armnn::SubtractionQueueDescriptor,
80  armnn::StringMapping::RefSubtractionWorkload_Execute>;
81 
85 
87  armnn::MultiplicationQueueDescriptor,
88  armnn::StringMapping::RefMultiplicationWorkload_Execute>;
89 
93 
95  armnn::DivisionQueueDescriptor,
96  armnn::StringMapping::RefDivisionWorkload_Execute>;
97 
101 
103  armnn::MaximumQueueDescriptor,
104  armnn::StringMapping::RefMaximumWorkload_Execute>;
105 
109 
111  armnn::MinimumQueueDescriptor,
112  armnn::StringMapping::RefMinimumWorkload_Execute>;
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
CPU Execution: Reference C++ kernels.
static const StringMapping & Instance()
Copyright (c) 2020 ARM Limited.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:175
RefElementwiseWorkload(const ParentDescriptor &descriptor, const WorkloadInfo &info)
Contains information about inputs and outputs to a layer.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers