aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefConstantWorkload.cpp
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2019-01-23 15:23:11 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-01-23 17:13:15 +0000
commitdb2b160bf9e7759d0157dfa57ee940290f5170e3 (patch)
tree536fa36ebc9eb8442b96b486a10cadab28d32647 /src/backends/reference/workloads/RefConstantWorkload.cpp
parentc625f000198218fc8d03130ee5658f73b94b2683 (diff)
downloadarmnn-db2b160bf9e7759d0157dfa57ee940290f5170e3.tar.gz
IVGCVSW-2511 Add end to end Gather layer test
* Add end to end test for Gather operator * Add Support for int32 to Constant layer for Ref * Add Int32Workload * Add RefConstantWorkload as template for float, uint8, int32 * Remove unused RefBaseConstantWorkload * Remove unused RefConstantFloat32Workload * Remove unused RefConstantUint8Workload * Add support check for int32 in LayerSupport functions Change-Id: Ic970588a49ebe2aafb12be8adef52371feacaa7b
Diffstat (limited to 'src/backends/reference/workloads/RefConstantWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp52
1 files changed, 52 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
new file mode 100644
index 0000000000..e074c6fb04
--- /dev/null
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefConstantWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+
+#include <armnn/Types.hpp>
+
+#include <boost/assert.hpp>
+
+#include <cstring>
+
+namespace armnn
+{
+
+template <armnn::DataType DataType>
+void RefConstantWorkload<DataType>::Execute() const
+{
+ // Considering the reference backend independently, it could be possible to initialise the intermediate tensor
+ // created by the layer output handler at workload construction time, rather than at workload execution time.
+ // However, this is not an option for other backends (e.g. CL). For consistency, we prefer to align all
+ // implementations.
+ // A similar argument can be made about performing the memory copy in the first place (the layer output handler
+ // could have a non-owning reference to the layer output tensor managed by the const input layer); again, this is
+ // not an option for other backends, and the extra complexity required to make this work for the reference backend
+ // may not be worth the effort (skipping a memory copy in the first inference).
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute");
+
+ if (!m_RanOnce)
+ {
+ const ConstantQueueDescriptor& data = this->m_Data;
+
+ BOOST_ASSERT(data.m_LayerOutput != nullptr);
+
+ const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
+ BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+
+ memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
+ outputInfo.GetNumBytes());
+
+ m_RanOnce = true;
+ }
+}
+
+template class RefConstantWorkload<DataType::Float32>;
+template class RefConstantWorkload<DataType::QuantisedAsymm8>;
+template class RefConstantWorkload<DataType::Signed32>;
+
+} //namespace armnn