aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-04-07 10:23:21 +0100
committerFinn Williams <Finn.Williams@arm.com>2021-04-14 15:18:38 +0100
commitb8181f72b8c7c9132373dbcf7f8709ec2c0f23c0 (patch)
tree04cc91a6efb7e2601f80e4213a747938165b7184 /src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
parentb898222a8856475f0217be5e78b4816aa1914f15 (diff)
downloadarmnn-b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0.tar.gz
IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload
* Added multithreaded StridedSliceEndToEndTest Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
Diffstat (limited to 'src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp')
-rw-r--r--src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp17
1 files changed, 14 insertions, 3 deletions
diff --git a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
index cf355d35d2..b30811b8ed 100644
--- a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
@@ -28,12 +28,23 @@ void FakeQuantization(const float* inputData, float* outputData, uint32_t numEle
void RefFakeQuantizationFloat32Workload::Execute() const
{
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+
+void RefFakeQuantizationFloat32Workload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+{
+ Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+}
+
+void RefFakeQuantizationFloat32Workload::Execute(std::vector<ITensorHandle*> inputs,
+ std::vector<ITensorHandle*> outputs) const
+{
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute");
- const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
- const float* inputData = GetInputTensorDataFloat(0, m_Data);
- float* outputData = GetOutputTensorDataFloat(0, m_Data);
+ const float* inputData = reinterpret_cast<const float*>(inputs[0]->Map());
+ float* outputData = reinterpret_cast<float*>(outputs[0]->Map());
FakeQuantization(inputData, outputData, inputInfo.GetNumElements(),
m_Data.m_Parameters.m_Min,
m_Data.m_Parameters.m_Max);