ArmNN
 22.08
RefL2NormalizationWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 #include "Decoders.hpp"
9 #include "Encoders.hpp"
10 
11 #include <Profiling.hpp>
12 
15 
16 #include <cmath>
17 
18 using namespace armnnUtils;
19 
20 namespace armnn
21 {
23  const L2NormalizationQueueDescriptor& descriptor,
24  const WorkloadInfo& info)
25  : RefBaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
26 
28 {
30 }
31 
33 {
34  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
35  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
36 }
37 
38 void RefL2NormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
39 {
40  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
41 
42  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
43  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
44 
45  auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
46  auto outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
47 
49 
50  const TensorShape& shape = inputInfo.GetShape();
51  unsigned int paddedShapeArray[4];
52  const int idxShift = 4 - armnn::numeric_cast<int>(shape.GetNumDimensions());
53 
54  const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
55  paddedShapeArray[0] = batches;
56 
57  const int channelsIdx = armnn::numeric_cast<int>(dataLayout.GetChannelsIndex());
58  const unsigned int channels = (channelsIdx - idxShift >= 0)
59  ? shape[armnn::numeric_cast<unsigned int>(channelsIdx - idxShift)]
60  : 1;
61  paddedShapeArray[channelsIdx] = channels;
62 
63  const int heightIdx = armnn::numeric_cast<int>(dataLayout.GetHeightIndex());
64  const unsigned int height = (heightIdx - idxShift >= 0)
65  ? shape[armnn::numeric_cast<unsigned int>(heightIdx - idxShift)]
66  : 1;
67  paddedShapeArray[heightIdx] = height;
68 
69  const int widthIdx = armnn::numeric_cast<int>(dataLayout.GetWidthIndex());
70  const unsigned int width = (widthIdx - idxShift >= 0)
71  ? shape[armnn::numeric_cast<unsigned int>(widthIdx - idxShift)]
72  : 1;
73  paddedShapeArray[widthIdx] = width;
74 
75  const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
76 
77  for (unsigned int n = 0; n < batches; ++n)
78  {
79  for (unsigned int c = 0; c < channels; ++c)
80  {
81  for (unsigned int h = 0; h < height; ++h)
82  {
83  for (unsigned int w = 0; w < width; ++w)
84  {
85  float reduction = 0.0;
86  for (unsigned int d = 0; d < channels; ++d)
87  {
88  unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
89 
90  (*inputDecoder)[inputIndex];
91  const float value = inputDecoder->Get();
92  reduction += value * value;
93  }
94 
95  unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
96 
97  float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
98 
99  const float scale = 1.0f / sqrtf(maximum);
100 
101  (*inputDecoder)[index];
102  (*outputEncoder)[index];
103  outputEncoder->Set(inputDecoder->Get() * scale);
104  }
105  }
106  }
107  }
108 }
109 
110 } //namespace armnn
float m_Eps
Used to avoid dividing by zero.
CPU Execution: Reference C++ kernels.
void ExecuteAsync(ExecutionData &executionData) override
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
std::vector< ITensorHandle * > m_Outputs
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
RefL2NormalizationWorkload(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers