19 using namespace armnn;
34 const unsigned int batchSize = tensorShape[0];
35 const unsigned int depth = tensorShape[1];
36 const unsigned int rows = tensorShape[2];
37 const unsigned int cols = tensorShape[3];
41 for (
unsigned int n = 0; n < batchSize; n++)
43 for (
unsigned int c = 0; c < depth; c++)
45 for (
unsigned int h = 0; h < rows; h++)
47 for (
unsigned int w = 0; w < cols; w++)
49 float accumulated_scale = 0.0;
50 for (
int y = -radius; y <= radius; y++)
52 for (
int x = -radius; x <= radius; x++)
67 unsigned int inputIndex = n * cols * rows * depth +
70 armnn::numeric_cast<unsigned int>(i);
71 inputData[inputIndex];
72 float inval = inputData.
Get();
74 accumulated_scale += inval*inval;
78 unsigned int index = n * cols * rows * depth +
84 outputData.
Set(inputData.
Get() / (powf((kappa + (accumulated_scale * alpha)), beta)));
103 const unsigned int batchSize = tensorShape[0];
104 const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()];
105 const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
106 const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
110 for (
unsigned int n = 0; n < batchSize; n++)
112 for (
unsigned int c = 0; c < depth; c++)
114 for (
unsigned int h = 0; h < rows; h++)
116 for (
unsigned int w = 0; w < cols; w++)
118 float accumulated_scale = 0.0;
119 for (
int z = -radius; z <= radius; z++)
128 unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
130 armnn::numeric_cast<unsigned int>(k),
134 inputData[inputIndex];
135 float inval = inputData.
Get();
137 accumulated_scale += inval * inval;
140 float scale = kappa + (accumulated_scale * alpha);
141 scale = powf(scale, -beta);
143 unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w);
147 outputData.
Set(scale * inputData.
Get());
180 auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
181 auto outputEncoder = MakeEncoder<float>(inputInfo, outputs[0]->Map());
187 NormalizeWithinUingLbr(*inputDecoder,
189 inputInfo.GetShape(),
197 NormalizeAcrossUingLbr(*inputDecoder,
199 inputInfo.GetShape(),
214 ARMNN_LOG(
warning) <<
"Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet.";
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
float m_K
Kappa value used for the across channel normalization equation.
CPU Execution: Reference C++ kernels.
float m_Alpha
Alpha value for the normalization equation.
virtual void Set(IType right)=0
#define ARMNN_LOG(severity)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Copyright (c) 2021 ARM Limited and Contributors.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
virtual IType Get() const =0
std::vector< ITensorHandle * > m_Inputs
RefNormalizationWorkload(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
NormalizationQueueDescriptor m_Data
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
void Execute() const override
Krichevsky 2012: Local Brightness Normalization.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.