14 #include <boost/numeric/conversion/cast.hpp> 20 using namespace armnn;
35 const unsigned int batchSize = tensorShape[0];
36 const unsigned int depth = tensorShape[1];
37 const unsigned int rows = tensorShape[2];
38 const unsigned int cols = tensorShape[3];
42 for (
unsigned int n = 0; n < batchSize; n++)
44 for (
unsigned int c = 0; c < depth; c++)
46 for (
unsigned int h = 0; h < rows; h++)
48 for (
unsigned int w = 0; w < cols; w++)
50 float accumulated_scale = 0.0;
51 for (
int y = -radius; y <= radius; y++)
53 for (
int x = -radius; x <= radius; x++)
68 unsigned int inputIndex = n * cols * rows * depth +
71 boost::numeric_cast<unsigned int>(i);
72 inputData[inputIndex];
73 float inval = inputData.
Get();
75 accumulated_scale += inval*inval;
79 unsigned int index = n * cols * rows * depth +
85 outputData.
Set(inputData.
Get() / (powf((kappa + (accumulated_scale * alpha)), beta)));
104 const unsigned int batchSize = tensorShape[0];
105 const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()];
106 const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
107 const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
111 for (
unsigned int n = 0; n < batchSize; n++)
113 for (
unsigned int c = 0; c < depth; c++)
115 for (
unsigned int h = 0; h < rows; h++)
117 for (
unsigned int w = 0; w < cols; w++)
119 float accumulated_scale = 0.0;
120 for (
int z = -radius; z <= radius; z++)
129 unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
131 boost::numeric_cast<unsigned int>(k),
135 inputData[inputIndex];
136 float inval = inputData.
Get();
138 accumulated_scale += inval * inval;
141 float scale = kappa + (accumulated_scale * alpha);
142 scale = powf(scale, -beta);
144 unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w);
148 outputData.
Set(scale * inputData.
Get());
171 auto inputDecoder = MakeDecoder<float>(inputInfo,
m_Data.
m_Inputs[0]->Map());
172 auto outputEncoder = MakeEncoder<float>(inputInfo,
m_Data.
m_Outputs[0]->Map());
178 NormalizeWithinUingLbr(*inputDecoder,
180 inputInfo.GetShape(),
188 NormalizeAcrossUingLbr(*inputDecoder,
190 inputInfo.GetShape(),
205 ARMNN_LOG(
warning) <<
"Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet.";
float m_K
Kappa value used for the across channel normalization equation.
CPU Execution: Reference C++ kernels.
float m_Alpha
Alpha value for the normalization equation.
const NormalizationQueueDescriptor m_Data
virtual void Set(IType right)=0
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
#define ARMNN_LOG(severity)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Copyright (c) 2020 ARM Limited.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
virtual IType Get() const =0
RefNormalizationWorkload(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
virtual void Execute() const override
Krichevsky 2012: Local Brightness Normalization.
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.