aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefQuantizeWorkload.cpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-04-07 10:23:21 +0100
committerFinn Williams <Finn.Williams@arm.com>2021-04-14 15:18:38 +0100
commitb8181f72b8c7c9132373dbcf7f8709ec2c0f23c0 (patch)
tree04cc91a6efb7e2601f80e4213a747938165b7184 /src/backends/reference/workloads/RefQuantizeWorkload.cpp
parentb898222a8856475f0217be5e78b4816aa1914f15 (diff)
downloadarmnn-b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0.tar.gz
IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload
* Added multithreaded StridedSliceEndToEndTest Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
Diffstat (limited to 'src/backends/reference/workloads/RefQuantizeWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp19
1 files changed, 10 insertions, 9 deletions
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index 2eef5f33db..35791e65fb 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -34,21 +34,22 @@ RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descript
{
}
-void RefQuantizeWorkload::PostAllocationConfigure()
+void RefQuantizeWorkload::Execute() const
{
- const TensorInfo& inputInfo = armnn::GetTensorInfo(m_Data.m_Inputs[0]);
- m_InputDecoder = MakeDecoder<float>(inputInfo);
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
- const TensorInfo& outputInfo = armnn::GetTensorInfo(m_Data.m_Outputs[0]);
- m_OutputEncoder = MakeEncoder<float>(outputInfo);
+void RefQuantizeWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+{
+ Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
}
-void RefQuantizeWorkload::Execute() const
+void RefQuantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
- m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
- QuantizeImpl(*m_InputDecoder, *m_OutputEncoder, m_NumElements);
+ QuantizeImpl(*inputDecoder, *outputEncoder, m_NumElements);
}
} //namespace armnn \ No newline at end of file