ArmNN
 23.05
RefGatherNdWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Gather.hpp"
9 #include "Profiling.hpp"
10 #include "RefWorkloadUtils.hpp"
12 
13 namespace armnn
14 {
15 
17 {
19 }
20 
22 {
23  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
24  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
25 }
26 
27 void RefGatherNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
28 {
29  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefGatherNdWorkload_Execute");
30 
31  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
32  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
33  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
34 
35  std::unique_ptr<Decoder<float>> params_decoderPtr = MakeDecoder<float>(inputInfo0, inputs[0]->Map());
36 
37  const int32_t* indicesDataPtr = reinterpret_cast<int32_t*>(inputs[1]->Map());
38  std::vector<int32_t> indices(indicesDataPtr, indicesDataPtr + inputInfo1.GetNumElements());
39 
40  std::unique_ptr<Encoder<float>> output_encoderPtr = MakeEncoder<float>(outputInfo, outputs[0]->Map());
41 
42  std::map<std::string, unsigned int> keyIndices = CalculateGatherNdKeyIndices(inputInfo0, inputInfo1);
43 
44  /// Calculate flattened indices: flattenedIndices = indices * flattenedCoefficients
45  // Calculate the flattened coefficients to use in the multiplication
46  // to calculate the flattened indices needed by gather
47  TensorShape paramsShape = inputInfo0.GetShape();
48  std::vector<unsigned int> flattenedCoeff(keyIndices["ND"], 1);
49  for (unsigned int i = 1; i < keyIndices["ND"]; ++i)
50  {
51  flattenedCoeff[i-1] = paramsShape[i];
52  }
53  for (unsigned int i = keyIndices["ND"]-1; i > 0; --i)
54  {
55  flattenedCoeff[i-1] *= flattenedCoeff[i];
56  }
57 
58  // Prepare the vector to store the output of the matrix multiplication,
59  // which will represent the flattened indices needed by gather
60  armnn::TensorInfo flattenedIndices_Info = inputInfo1;
61  flattenedIndices_Info.SetShape({ keyIndices["W"] });
62  std::vector<int32_t> flattenedIndices(flattenedIndices_Info.GetNumElements(), 0);
63 
64  // Multiplication to calculate the flattened indices, which are the indices needed by gather.
65  for (unsigned int i = 0; i < keyIndices["W"]; ++i)
66  {
67  for (unsigned int j = 0; j < keyIndices["ND"]; ++j)
68  {
69  flattenedIndices[i] += indices[i * keyIndices["ND"] + j] * static_cast<int32_t>(flattenedCoeff[j]);
70  }
71  }
72 
73  /// Call Gather with adequate shapes
74  // Reshape params into {K, C}
75  armnn::TensorInfo params_K_C_Info = inputInfo0;
76  params_K_C_Info.SetShape({ keyIndices["K"], keyIndices["C"] });
77 
78  // Reshape indices into {N, W}
79  armnn::TensorInfo indices_N_W_Info = inputInfo1;
80  indices_N_W_Info.SetShape({ keyIndices["N"], keyIndices["W"] });
81 
82  // Reshape output to have the shape given by gather {N, W, C}
83  // (the original outputInfo has the shape given by gatherNd)
84  armnn::TensorInfo outputGather_Info = outputInfo;
85  outputGather_Info.SetShape({ keyIndices["N"], keyIndices["W"], keyIndices["C"] });
86 
87  // output_gather = gather(params_K_C, indices_N_W)
88  Gather(params_K_C_Info, indices_N_W_Info, outputGather_Info,
89  *params_decoderPtr, flattenedIndices.data(), *output_encoderPtr, 0);
90 }
91 
92 } //namespace armnn
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:27
armnn::CalculateGatherNdKeyIndices
std::map< std::string, unsigned int > CalculateGatherNdKeyIndices(TensorInfo inputInfo0, TensorInfo inputInfo1)
Calculates the key index values needed for GatherNd: N, ND, K, W, C (N is always 1)
Definition: WorkloadUtils.cpp:300
armnn::RefGatherNdWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition: RefGatherNdWorkload.cpp:21
armnn::LayerType::Map
@ Map
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkingMemDescriptor.hpp:20
armnn::experimental::ExecutionData
Definition: ExecutionData.hpp:14
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition: ExecutionData.hpp:16
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
armnn::BaseWorkload< GatherNdQueueDescriptor >::m_Data
GatherNdQueueDescriptor m_Data
Definition: Workload.hpp:83
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::TensorShape
Definition: Tensor.hpp:20
RefWorkloadUtils.hpp
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::Gather
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis_int)
Definition: Gather.cpp:14
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::RefGatherNdWorkload::Execute
void Execute() const override
Definition: RefGatherNdWorkload.cpp:16
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkingMemDescriptor.hpp:21
Gather.hpp
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
WorkloadUtils.hpp
RefGatherNdWorkload.hpp
armnn::Compute::CpuRef
@ CpuRef
CPU Execution: Reference C++ kernels.
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
Profiling.hpp