20 template <
typename Functor,
typename ParentDescriptor,
typename armnn::StringMapping::Id DebugString>
22 const ParentDescriptor& desc,
28 template <
typename Functor,
typename ParentDescriptor,
typename armnn::StringMapping::Id DebugString>
34 template <
typename Functor,
typename ParentDescriptor,
typename armnn::StringMapping::Id DebugString>
41 template <
typename Functor,
typename ParentDescriptor,
typename armnn::StringMapping::Id DebugString>
43 std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs)
const 54 std::unique_ptr<Decoder<InType>> input0 = MakeDecoder<InType>(inputInfo0, inputs[0]->Map());
55 std::unique_ptr<Decoder<InType>> input1 = MakeDecoder<InType>(inputInfo1, inputs[1]->Map());
56 std::unique_ptr<Encoder<OutType>> output= MakeEncoder<OutType>(outputInfo, outputs[0]->Map());
81 armnn::SubtractionQueueDescriptor,
82 armnn::StringMapping::RefSubtractionWorkload_Execute>;
89 armnn::MultiplicationQueueDescriptor,
90 armnn::StringMapping::RefMultiplicationWorkload_Execute>;
97 armnn::DivisionQueueDescriptor,
98 armnn::StringMapping::RefDivisionWorkload_Execute>;
105 armnn::MaximumQueueDescriptor,
106 armnn::StringMapping::RefMaximumWorkload_Execute>;
113 armnn::MinimumQueueDescriptor,
114 armnn::StringMapping::RefMinimumWorkload_Execute>;
const TensorShape & GetShape() const
CPU Execution: Reference C++ kernels.
static const StringMapping & Instance()
Copyright (c) 2021 ARM Limited and Contributors.
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
std::vector< ITensorHandle * > m_Inputs
void Execute() const override
RefElementwiseWorkload(const ParentDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers