aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2019-11-01 11:09:36 +0000
committerJan Eilers <jan.eilers@arm.com>2019-11-04 12:09:08 +0000
commitf71079328ae72a65c91e410b2bd35eabb67cb6d1 (patch)
treee5460c94ea84f0ffb6ec09df820912cd9bd750ec /src/backends/reference/workloads
parent7ff9a6096e3c1facbd6786993a6437b9f72069d2 (diff)
downloadarmnn-f71079328ae72a65c91e410b2bd35eabb67cb6d1.tar.gz
Add fp16 support for dequantize
* Changed RefDequantizeWorkload to use Encoder/Decoder * Added related unit tests for Cl, Neon and Ref Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Ic2fd4103090dd2127c6859b49305736f7b2dfb05
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt2
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp29
-rw-r--r--src/backends/reference/workloads/Dequantize.hpp20
-rw-r--r--src/backends/reference/workloads/RefDequantizeWorkload.cpp20
4 files changed, 58 insertions, 13 deletions
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 7844518620..29abfedcef 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -28,6 +28,8 @@ list(APPEND armnnRefBackendWorkloads_sources
DepthToSpace.hpp
DetectionPostProcess.cpp
DetectionPostProcess.hpp
+ Dequantize.cpp
+ Dequantize.hpp
ElementwiseFunction.cpp
ElementwiseFunction.hpp
Encoders.hpp
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
new file mode 100644
index 0000000000..fafc03e69b
--- /dev/null
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Dequantize.hpp"
+
+namespace armnn
+{
+
+void Dequantize(Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder,
+ const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo)
+{
+ BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+ for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
+ {
+ // inputDecoder.Get() dequantizes the data element from whatever
+ // type is given by inputInfo to fp32 (If MakeDecoder supports that dequantization)
+ // outputEncoder.Set() transforms the data element to whatever type is
+ // given by outputInfo (if MakeEncoder supports that transformation)
+ outputEncoder.Set(inputDecoder.Get());
+ ++outputEncoder;
+ ++inputDecoder;
+ }
+}
+
+} // armnn namespace \ No newline at end of file
diff --git a/src/backends/reference/workloads/Dequantize.hpp b/src/backends/reference/workloads/Dequantize.hpp
new file mode 100644
index 0000000000..c01b454833
--- /dev/null
+++ b/src/backends/reference/workloads/Dequantize.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+
+namespace armnn
+{
+
+void Dequantize(Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder,
+ const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
index d861c50730..e6f5c6b359 100644
--- a/src/backends/reference/workloads/RefDequantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
@@ -5,6 +5,9 @@
#include "RefDequantizeWorkload.hpp"
#include "RefWorkloadUtils.hpp"
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+#include "Dequantize.hpp"
namespace armnn
{
@@ -14,21 +17,12 @@ void RefDequantizeWorkload::Execute() const
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- const DataType& inputDataType = inputInfo.GetDataType();
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- float* outputData = GetOutputTensorData<float>(0, m_Data);
+ auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+ auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
- switch (inputDataType)
- {
- case DataType::QuantisedAsymm8:
- Dequantize<uint8_t>(GetInputTensorData<uint8_t>(0, m_Data), outputData, inputInfo);
- break;
- case DataType::QuantisedSymm16:
- Dequantize<int16_t>(GetInputTensorData<int16_t>(0, m_Data), outputData, inputInfo);
- break;
- default:
- throw InvalidArgumentException("RefDequantizeWorkload: Unsupported input data type");
- }
+ Dequantize(*inputDecoder, *outputEncoder, inputInfo, outputInfo);
}
} // namespace armnn