aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-10-18 08:45:39 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commit8e6f92d2d6b2bb94e6ba9b794c08e465039ea98b (patch)
tree3e4c9e40108152de33c88ad53668bb10416a6a2e
parent8eb675eb77865b5d2491f5b2d650ce993cab738c (diff)
downloadarmnn-8e6f92d2d6b2bb94e6ba9b794c08e465039ea98b.tar.gz
IVGCVSW-2032 + IVGCVSW-2033 Add NHWC implementation to the reference
Normalization workload * Enabled NHWC support in RefNormalizationFloat32Workload for Across normalization * Added unit test for the reference implementation Change-Id: I0e1f319e76491d43b83b121c273fadb5b259d1a0
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp2
-rw-r--r--src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp41
-rwxr-xr-xsrc/backends/test/LayerTests.cpp2
-rw-r--r--src/backends/test/NormTestImpl.hpp8
4 files changed, 28 insertions, 25 deletions
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 6cfa4a3926..21371611bb 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -100,8 +100,10 @@ ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2dUint8, AsymmetricNonSquarePooling2dU
ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest)
ARMNN_AUTO_TEST_CASE(ConstantLinearActivationUint8, ConstantLinearActivationUint8Test)
+// Normalization
ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
+ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f)
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f)
diff --git a/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp b/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
index 5c24416624..4cec02338a 100644
--- a/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
@@ -6,6 +6,7 @@
#include "RefNormalizationFloat32Workload.hpp"
#include "RefWorkloadUtils.hpp"
+#include "TensorBufferArrayView.hpp"
#include "Profiling.hpp"
@@ -87,12 +88,22 @@ void NormalizeAcrossUingLbr(const float* inputData,
uint32_t norm_size,
float alpha,
float beta,
- float kappa)
+ float kappa,
+ DataLayout dataLayout)
{
+ TensorBufferArrayView<const float> input(tensorShape,
+ inputData,
+ dataLayout);
+ TensorBufferArrayView<float> output(tensorShape,
+ outputData,
+ dataLayout);
+
+ DataLayoutIndexed dataLayoutIndexed(dataLayout);
+
const unsigned int batchSize = tensorShape[0];
- const unsigned int depth = tensorShape[1];
- const unsigned int rows = tensorShape[2];
- const unsigned int cols = tensorShape[3];
+ const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()];
+ const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
+ const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
@@ -114,23 +125,15 @@ void NormalizeAcrossUingLbr(const float* inputData,
continue;
}
- float inval = inputData[n * cols * rows * depth +
- boost::numeric_cast<unsigned int>(k) * cols * rows +
- h * cols +
- w];
+ float inval = input.Get(n, boost::numeric_cast<unsigned int>(k), h, w);
- accumulated_scale += inval*inval;
+ accumulated_scale += inval * inval;
}
+
float scale = kappa + (accumulated_scale * alpha);
scale = powf(scale, -beta);
- outputData[n * cols * rows * depth +
- c * cols * rows +
- h * cols +
- w] = scale *
- inputData[n * cols * rows * depth +
- c * cols * rows +
- h * cols +
- w];
+
+ output.Get(n, c, h, w) = scale * input.Get(n, c, h, w);
}
}
}
@@ -146,7 +149,6 @@ void RefNormalizationFloat32Workload::Execute() const
float* outputData = GetOutputTensorDataFloat(0, m_Data);
const float* inputData = GetInputTensorDataFloat(0, m_Data);
-
if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType)
{
if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType)
@@ -167,7 +169,8 @@ void RefNormalizationFloat32Workload::Execute() const
m_Data.m_Parameters.m_NormSize,
m_Data.m_Parameters.m_Alpha,
m_Data.m_Parameters.m_Beta,
- m_Data.m_Parameters.m_K);
+ m_Data.m_Parameters.m_K,
+ m_Data.m_Parameters.m_DataLayout);
}
else
{
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index 1faacacb5c..726cb7d26b 100755
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -694,7 +694,7 @@ LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFacto
{
auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
- return SimpleNormalizationNhwcClNeonTestImpl(workloadFactory, normChannel, normMethod);
+ return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
}
LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
diff --git a/src/backends/test/NormTestImpl.hpp b/src/backends/test/NormTestImpl.hpp
index 300eeceae0..f4e6aea008 100644
--- a/src/backends/test/NormTestImpl.hpp
+++ b/src/backends/test/NormTestImpl.hpp
@@ -152,11 +152,9 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(armnn::IWorkloadFactory& wo
return ret;
}
-// This is test implementation for CL and NEON,
-// as currently, only Across Normalization is supported on CL and NEON for NHWC.
-LayerTestResult<float,4> SimpleNormalizationNhwcClNeonTestImpl(armnn::IWorkloadFactory& workloadFactory,
- armnn::NormalizationAlgorithmChannel normChannel,
- armnn::NormalizationAlgorithmMethod normMethod)
+LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ armnn::NormalizationAlgorithmChannel normChannel,
+ armnn::NormalizationAlgorithmMethod normMethod)
{
const unsigned int inputHeight = 2;
const unsigned int inputWidth = 2;