ArmNN
 20.05
RefL2NormalizationWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 #include "Decoders.hpp"
9 #include "Encoders.hpp"
10 
11 #include <Profiling.hpp>
12 
14 
15 #include <boost/numeric/conversion/cast.hpp>
16 
17 #include <cmath>
18 
19 using namespace armnnUtils;
20 
21 namespace armnn
22 {
24  const L2NormalizationQueueDescriptor& descriptor,
25  const WorkloadInfo& info)
26  : BaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
27 
29 {
30  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
31 
32  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
33  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
34 
35  auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
36  auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
37 
39 
40  const TensorShape& shape = inputInfo.GetShape();
41  unsigned int paddedShapeArray[4];
42  const int idxShift = 4 - boost::numeric_cast<int>(shape.GetNumDimensions());
43 
44  const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
45  paddedShapeArray[0] = batches;
46 
47  const int channelsIdx = boost::numeric_cast<int>(dataLayout.GetChannelsIndex());
48  const unsigned int channels = (channelsIdx - idxShift >= 0)
49  ? shape[boost::numeric_cast<unsigned int>(channelsIdx - idxShift)]
50  : 1;
51  paddedShapeArray[channelsIdx] = channels;
52 
53  const int heightIdx = boost::numeric_cast<int>(dataLayout.GetHeightIndex());
54  const unsigned int height = (heightIdx - idxShift >= 0)
55  ? shape[boost::numeric_cast<unsigned int>(heightIdx - idxShift)]
56  : 1;
57  paddedShapeArray[heightIdx] = height;
58 
59  const int widthIdx = boost::numeric_cast<int>(dataLayout.GetWidthIndex());
60  const unsigned int width = (widthIdx - idxShift >= 0)
61  ? shape[boost::numeric_cast<unsigned int>(widthIdx - idxShift)]
62  : 1;
63  paddedShapeArray[widthIdx] = width;
64 
65  const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
66 
67  for (unsigned int n = 0; n < batches; ++n)
68  {
69  for (unsigned int c = 0; c < channels; ++c)
70  {
71  for (unsigned int h = 0; h < height; ++h)
72  {
73  for (unsigned int w = 0; w < width; ++w)
74  {
75  float reduction = 0.0;
76  for (unsigned int d = 0; d < channels; ++d)
77  {
78  unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
79 
80  (*inputDecoder)[inputIndex];
81  const float value = inputDecoder->Get();
82  reduction += value * value;
83  }
84 
85  unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
86 
87  float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
88 
89  const float scale = 1.0f / sqrtf(maximum);
90 
91  (*inputDecoder)[index];
92  (*outputEncoder)[index];
93  outputEncoder->Set(inputDecoder->Get() * scale);
94  }
95  }
96  }
97  }
98 }
99 
100 } //namespace armnn
float m_Eps
Used to avoid dividing by zero.
CPU Execution: Reference C++ kernels.
const L2NormalizationQueueDescriptor m_Data
Definition: Workload.hpp:46
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:169
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
std::vector< ITensorHandle * > m_Outputs
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
RefL2NormalizationWorkload(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs