From 2fc70c5f7bebd95da7c718907011c92fd29e3603 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Wed, 5 Jun 2019 14:12:48 +0100 Subject: IVGCVSW-3226 Refactor the reference normalization workload * Refactored RefNormalizationFloat32Workload into RefNormalizationWorkload * Added ref support of Uint8 norm workloads * Added workload unit tests for Uint8 Change-Id: I063ce919c267e02a32e739848e49d75fd98a5eb6 Signed-off-by: Matteo Martincigh --- .../workloads/RefNormalizationWorkload.cpp | 210 +++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 src/backends/reference/workloads/RefNormalizationWorkload.cpp (limited to 'src/backends/reference/workloads/RefNormalizationWorkload.cpp') diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp new file mode 100644 index 0000000000..8ff2d9cf92 --- /dev/null +++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp @@ -0,0 +1,210 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefNormalizationWorkload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "Decoders.hpp" +#include "Encoders.hpp" + +#include + +#include +#include + +#include +#include + +using namespace armnn; +using namespace armnnUtils; + +namespace +{ + +// Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization. +void NormalizeWithinUingLbr(Decoder& inputData, + Encoder& outputData, + const TensorShape& tensorShape, + uint32_t norm_size, + float alpha, + float beta, + float kappa) +{ + const unsigned int batchSize = tensorShape[0]; + const unsigned int depth = tensorShape[1]; + const unsigned int rows = tensorShape[2]; + const unsigned int cols = tensorShape[3]; + + int radius = boost::numeric_cast(norm_size / 2u); /* Strong Assumption on rounding Mode */ + + for (unsigned int n = 0; n < batchSize; n++) + { + for (unsigned int c = 0; c < depth; c++) + { + for (unsigned int h = 0; h < rows; h++) + { + for (unsigned int w = 0; w < cols; w++) + { + float accumulated_scale = 0.0; + for (int y = -radius; y <= radius; y++) + { + for (int x = -radius; x <= radius; x++) + { + int i = boost::numeric_cast(w) + x; + int j = boost::numeric_cast(h) + y; + + if ((i < 0) || (i >= boost::numeric_cast(cols))) + { + continue; + } + + if ((j < 0) || (j >= boost::numeric_cast(rows))) + { + continue; + } + + unsigned int inputIndex = n * cols * rows * depth + + c * cols * rows + + boost::numeric_cast(j) * cols + + boost::numeric_cast(i); + inputData[inputIndex]; + float inval = inputData.Get(); + + accumulated_scale += inval*inval; + } + } + + unsigned int index = n * cols * rows * depth + + c * cols * rows + + h * cols + + w; + inputData[index]; + outputData[index]; + outputData.Set(inputData.Get() / (powf((kappa + (accumulated_scale * alpha)), beta))); + } + } + } + } +} + +// Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization. +void NormalizeAcrossUingLbr(Decoder& inputData, + Encoder& outputData, + const TensorShape& tensorShape, + uint32_t norm_size, + float alpha, + float beta, + float kappa, + DataLayout dataLayout) +{ + DataLayoutIndexed dataLayoutIndexed(dataLayout); + + const unsigned int batchSize = tensorShape[0]; + const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()]; + const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()]; + const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()]; + + int radius = boost::numeric_cast(norm_size / 2u); /* Strong Assumption on rounding Mode */ + + for (unsigned int n = 0; n < batchSize; n++) + { + for (unsigned int c = 0; c < depth; c++) + { + for (unsigned int h = 0; h < rows; h++) + { + for (unsigned int w = 0; w < cols; w++) + { + float accumulated_scale = 0.0; + for (int z = -radius; z <= radius; z++) + { + int k = boost::numeric_cast(c) + z; + + if ((k < 0) || (k >= boost::numeric_cast(depth))) + { + continue; + } + + unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape, + n, + boost::numeric_cast(k), + h, + w); + + inputData[inputIndex]; + float inval = inputData.Get(); + + accumulated_scale += inval * inval; + } + + float scale = kappa + (accumulated_scale * alpha); + scale = powf(scale, -beta); + + unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w); + + inputData[index]; + outputData[index]; + outputData.Set(scale * inputData.Get()); + } + } + } + } +} + +} // Anonymous namespace + +namespace armnn +{ + +RefNormalizationWorkload::RefNormalizationWorkload(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{} + +void RefNormalizationWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + auto inputDecoder = MakeDecoder(inputInfo, m_Data.m_Inputs[0]->Map()); + auto outputEncoder = MakeEncoder(inputInfo, m_Data.m_Outputs[0]->Map()); + + if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType) + { + if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType) + { + NormalizeWithinUingLbr(*inputDecoder, + *outputEncoder, + inputInfo.GetShape(), + m_Data.m_Parameters.m_NormSize, + m_Data.m_Parameters.m_Alpha, + m_Data.m_Parameters.m_Beta, + m_Data.m_Parameters.m_K); + } + else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType) + { + NormalizeAcrossUingLbr(*inputDecoder, + *outputEncoder, + inputInfo.GetShape(), + m_Data.m_Parameters.m_NormSize, + m_Data.m_Parameters.m_Alpha, + m_Data.m_Parameters.m_Beta, + m_Data.m_Parameters.m_K, + m_Data.m_Parameters.m_DataLayout); + } + else + { + BOOST_LOG_TRIVIAL(warning) << "Illegal NORMALIZATION mode in normalization_f32"; + return; + } + } + else + { + BOOST_LOG_TRIVIAL(warning) << "Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet."; + return; + } +} + +} // namespace armnn -- cgit v1.2.1