aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/BatchNormImpl.cpp
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-06-03 16:54:25 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-06-04 15:13:51 +0000
commit3122bd574a3d29774c535ca2136de361da626e88 (patch)
treec2fcc19be67f5a35c30d042b80ba3157ef87bd21 /src/backends/reference/workloads/BatchNormImpl.cpp
parent550fe36f687e73c78b57ebfeee9f98fd35f40f24 (diff)
downloadarmnn-3122bd574a3d29774c535ca2136de361da626e88.tar.gz
IVGCVSW-3212 Refactor the Reference BatchNormalization workloads to
handle Float32 and QAsymm8 types * Removed the type-specific workload implementations * Added type-independent RefBatchNormalizationWorkload implementation * Reworked BachNormImpl to use decoders/encoders * Improved the validation of the BatchNorm queue descriptor * Fixed unit tests where necessary Change-Id: Icf3fa1332292d38ec2fa0b1cb984cab78426034b Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/backends/reference/workloads/BatchNormImpl.cpp')
-rw-r--r--src/backends/reference/workloads/BatchNormImpl.cpp82
1 files changed, 82 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/BatchNormImpl.cpp b/src/backends/reference/workloads/BatchNormImpl.cpp
new file mode 100644
index 0000000000..36e96d3fec
--- /dev/null
+++ b/src/backends/reference/workloads/BatchNormImpl.cpp
@@ -0,0 +1,82 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchNormImpl.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <armnn/Tensor.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+#include <cmath>
+
+namespace armnn
+{
+
+void BatchNormImpl(const BatchNormalizationQueueDescriptor& data,
+ Decoder<float>& meanDecoder,
+ Decoder<float>& varianceDecoder,
+ Decoder<float>& betaDecoder,
+ Decoder<float>& gammaDecoder,
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder)
+{
+ const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[0]);
+ const TensorShape inputShape = inputInfo.GetShape();
+
+ armnnUtils::DataLayoutIndexed dataLayout(data.m_Parameters.m_DataLayout);
+
+ unsigned int inputBatches = inputShape[0];
+ unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()];
+ unsigned int inputWidth = inputShape[dataLayout.GetWidthIndex()];
+ unsigned int inputChannels = inputShape[dataLayout.GetChannelsIndex()];
+
+ for (unsigned int c = 0; c < inputChannels; c++)
+ {
+ meanDecoder[c];
+ varianceDecoder[c];
+ betaDecoder[c];
+ gammaDecoder[c];
+ float mean = meanDecoder.Get();
+ float var = varianceDecoder.Get();
+ float beta = betaDecoder.Get();
+ float gamma = gammaDecoder.Get();
+
+ float mult = gamma / sqrtf(var + data.m_Parameters.m_Eps);
+ float add = beta - mult * mean;
+
+ for (unsigned int n = 0; n < inputBatches; n++)
+ {
+ for (unsigned int h = 0; h < inputHeight; h++)
+ {
+ for (unsigned int w = 0; w < inputWidth; w++)
+ {
+ unsigned int index = 0;
+
+ if (dataLayout == DataLayout::NHWC)
+ {
+ index = n * inputHeight * inputWidth * inputChannels +
+ h * inputWidth * inputChannels +
+ w * inputChannels +
+ c;
+ }
+ else // dataLayout == DataLayout::NCHW
+ {
+ index = n * inputHeight * inputWidth * inputChannels +
+ c * inputHeight * inputWidth +
+ h * inputWidth +
+ w;
+ }
+
+ inputDecoder[index];
+ outputEncoder[index];
+ outputEncoder.Set(mult * inputDecoder.Get() + add);
+ }
+ }
+ }
+ }
+}
+
+} // namespace armnn