aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-06-03 16:54:25 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-06-04 15:13:51 +0000
commit3122bd574a3d29774c535ca2136de361da626e88 (patch)
treec2fcc19be67f5a35c30d042b80ba3157ef87bd21 /src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
parent550fe36f687e73c78b57ebfeee9f98fd35f40f24 (diff)
downloadarmnn-3122bd574a3d29774c535ca2136de361da626e88.tar.gz
IVGCVSW-3212 Refactor the Reference BatchNormalization workloads to
handle Float32 and QAsymm8 types * Removed the type-specific workload implementations * Added type-independent RefBatchNormalizationWorkload implementation * Reworked BachNormImpl to use decoders/encoders * Improved the validation of the BatchNorm queue descriptor * Fixed unit tests where necessary Change-Id: Icf3fa1332292d38ec2fa0b1cb984cab78426034b Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp45
1 files changed, 45 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
new file mode 100644
index 0000000000..b43b104459
--- /dev/null
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefBatchNormalizationWorkload.hpp"
+
+#include "BatchNormImpl.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+RefBatchNormalizationWorkload::RefBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload(descriptor, info)
+ , m_Mean (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Mean)))
+ , m_Variance(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Variance)))
+ , m_Beta (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Beta)))
+ , m_Gamma (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Gamma)))
+{}
+
+void RefBatchNormalizationWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationWorkload_Execute");
+
+ std::unique_ptr<Decoder<float>> meanDecoder = MakeDecoder<float>(GetTensorInfo(m_Mean.get()),
+ m_Mean.get()->Map(true));
+ std::unique_ptr<Decoder<float>> varianceDecoder = MakeDecoder<float>(GetTensorInfo(m_Variance.get()),
+ m_Variance.get()->Map(true));
+ std::unique_ptr<Decoder<float>> gammaDecoder = MakeDecoder<float>(GetTensorInfo(m_Gamma.get()),
+ m_Gamma.get()->Map(true));
+ std::unique_ptr<Decoder<float>> betaDecoder = MakeDecoder<float>(GetTensorInfo(m_Beta.get()),
+ m_Beta.get()->Map(true));
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(m_Data.m_Inputs[0]),
+ m_Data.m_Inputs[0]->Map());
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(m_Data.m_Outputs[0]),
+ m_Data.m_Outputs[0]->Map());
+
+ BatchNormImpl(m_Data, *meanDecoder, *varianceDecoder, *betaDecoder, *gammaDecoder, *inputDecoder, *outputEncoder);
+}
+
+} //namespace armnn