ArmNN
 22.08
RefReduceWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefReduceWorkload.hpp"
7 
8 #include "Reduce.hpp"
9 #include "RefWorkloadUtils.hpp"
10 #include "BaseIterator.hpp"
11 #include "Profiling.hpp"
12 
13 namespace armnn
14 {
15 
17  const ReduceQueueDescriptor& descriptor,
18  const WorkloadInfo& info)
19  : RefBaseWorkload<ReduceQueueDescriptor>(descriptor, info) {}
20 
22 {
24 }
25 
27 {
28  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
29  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
30 }
31 
32 void RefReduceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
33 {
34  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReduceWorkload_Execute");
35 
36  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
37  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
38 
39  std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, inputs[0]->Map());
40  Decoder<float>& decoder = *decoderPtr;
41 
42  std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, outputs[0]->Map());
43  Encoder<float>& encoder = *encoderPtr;
44 
45  Reduce(inputInfo,
46  outputInfo,
47  decoder,
48  encoder,
51 }
52 
53 } //namespace armnn
CPU Execution: Reference C++ kernels.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
RefReduceWorkload(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info)
void ExecuteAsync(ExecutionData &executionData) override
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
void Execute() const override
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers