aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefConstantWorkload.cpp
diff options
context:
space:
mode:
authorNina Drozd <nina.drozd@arm.com>2019-05-16 12:09:18 +0100
committerNina Drozd <nina.drozd@arm.com>2019-05-20 08:56:46 +0000
commit58ef2c6f797f6bdb962016c519ebbc980ec2ed50 (patch)
tree282d50763bd2a6aaf4bb4ef5ea5ed123cfdcedb6 /src/backends/reference/workloads/RefConstantWorkload.cpp
parent39d487d3e2834a6d3024c92a519d84aa05f87925 (diff)
downloadarmnn-58ef2c6f797f6bdb962016c519ebbc980ec2ed50.tar.gz
IVGCVSW-2967 Support QSymm16 for Constant workloads
* Validate that output is any of supported types in WorkloadData * Validate that output is any of supported types in RefLayerSupport * Add test for constant with QuantisedSymm16 in LayerTests * Add test for creating constant workload in RefCreateWorkloadTests * Add test for constant with QuantisedSymm16 in RefLayerTests * Refactor RefConstantWorkload - BaseWorkload instead of TypedWorkload * Refactor RefConstantWorkload - remove m_RanOnce, use PostAllocationConfigure() Signed-off-by: Nina Drozd <nina.drozd@arm.com> Change-Id: Ic30e61319ef4ff9c367689901f7c6d498142a9c5
Diffstat (limited to 'src/backends/reference/workloads/RefConstantWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp41
1 files changed, 15 insertions, 26 deletions
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index e074c6fb04..3506198410 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -16,37 +16,26 @@
namespace armnn
{
-template <armnn::DataType DataType>
-void RefConstantWorkload<DataType>::Execute() const
-{
- // Considering the reference backend independently, it could be possible to initialise the intermediate tensor
- // created by the layer output handler at workload construction time, rather than at workload execution time.
- // However, this is not an option for other backends (e.g. CL). For consistency, we prefer to align all
- // implementations.
- // A similar argument can be made about performing the memory copy in the first place (the layer output handler
- // could have a non-owning reference to the layer output tensor managed by the const input layer); again, this is
- // not an option for other backends, and the extra complexity required to make this work for the reference backend
- // may not be worth the effort (skipping a memory copy in the first inference).
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute");
+RefConstantWorkload::RefConstantWorkload(
+ const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ConstantQueueDescriptor>(descriptor, info) {}
- if (!m_RanOnce)
- {
- const ConstantQueueDescriptor& data = this->m_Data;
-
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+void RefConstantWorkload::PostAllocationConfigure()
+{
+ const ConstantQueueDescriptor& data = this->m_Data;
- const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
- BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+ BOOST_ASSERT(data.m_LayerOutput != nullptr);
- memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
- outputInfo.GetNumBytes());
+ const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
+ BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
- m_RanOnce = true;
- }
+ memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
+ outputInfo.GetNumBytes());
}
-template class RefConstantWorkload<DataType::Float32>;
-template class RefConstantWorkload<DataType::QuantisedAsymm8>;
-template class RefConstantWorkload<DataType::Signed32>;
+void RefConstantWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute");
+}
} //namespace armnn