45 auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
46 auto outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
51 unsigned int paddedShapeArray[4];
54 const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
55 paddedShapeArray[0] = batches;
58 const unsigned int channels = (channelsIdx - idxShift >= 0)
59 ? shape[armnn::numeric_cast<unsigned int>(channelsIdx - idxShift)]
61 paddedShapeArray[channelsIdx] = channels;
64 const unsigned int height = (heightIdx - idxShift >= 0)
65 ? shape[armnn::numeric_cast<unsigned int>(heightIdx - idxShift)]
67 paddedShapeArray[heightIdx] = height;
70 const unsigned int width = (widthIdx - idxShift >= 0)
71 ? shape[armnn::numeric_cast<unsigned int>(widthIdx - idxShift)]
73 paddedShapeArray[widthIdx] = width;
77 for (
unsigned int n = 0; n < batches; ++n)
79 for (
unsigned int c = 0; c < channels; ++c)
81 for (
unsigned int h = 0; h < height; ++h)
83 for (
unsigned int w = 0; w < width; ++w)
85 float reduction = 0.0;
86 for (
unsigned int d = 0; d < channels; ++d)
88 unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
90 (*inputDecoder)[inputIndex];
91 const float value = inputDecoder->Get();
92 reduction += value * value;
95 unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
99 const float scale = 1.0f / sqrtf(maximum);
101 (*inputDecoder)[index];
102 (*outputEncoder)[index];
103 outputEncoder->Set(inputDecoder->Get() * scale);
float m_Eps
Used to avoid dividing by zero.
CPU Execution: Reference C++ kernels.
void ExecuteAsync(ExecutionData &executionData) override
Copyright (c) 2021 ARM Limited and Contributors.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< ITensorHandle * > m_Inputs
L2NormalizationQueueDescriptor m_Data
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
void Execute() const override
std::vector< ITensorHandle * > m_Outputs
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
RefL2NormalizationWorkload(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers