aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-05-29 16:20:16 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-05-31 14:24:49 +0000
commit983daec95bd3b3e5558771e995092981f2df2373 (patch)
tree16884f1abb4dcbb2e3997a804f2b2a56c28db9fe
parent120522aee38d756f9c963e1b5b4da12b66ac5a40 (diff)
downloadarmnn-983daec95bd3b3e5558771e995092981f2df2373.tar.gz
IVGCVSW-3147 Add CL Dequantization workload
Change-Id: I9baf6af3d98a26005a31075cd9c4e1f40938789b Signed-off-by: Jim Flynn <jim.flynn@arm.com>
-rw-r--r--src/backends/cl/ClLayerSupport.cpp11
-rw-r--r--src/backends/cl/ClLayerSupport.hpp4
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp6
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp3
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp7
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/ClDequantizeWorkload.cpp55
-rw-r--r--src/backends/cl/workloads/ClDequantizeWorkload.hpp28
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp1
10 files changed, 118 insertions, 0 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 48953026a1..5582799ff6 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -23,6 +23,7 @@
#include "workloads/ClConvertFp16ToFp32Workload.hpp"
#include "workloads/ClConvertFp32ToFp16Workload.hpp"
#include "workloads/ClConvolution2dWorkload.hpp"
+#include "workloads/ClDequantizeWorkload.hpp"
#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
#include "workloads/ClDivisionFloatWorkload.hpp"
#include "workloads/ClFullyConnectedWorkload.hpp"
@@ -274,6 +275,16 @@ bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
biases);
}
+bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+}
+
bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index f8a9a96f0f..35c00f9176 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -59,6 +59,10 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index a1f2aff4d9..70a4ddad4d 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -198,6 +198,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDetectionPostProcess(
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<ClDequantizeWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index c1216cbac0..c84ce13b8b 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -73,6 +73,9 @@ public:
std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index f5994c74ad..9bd37cc483 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -30,6 +30,7 @@ BACKEND_SOURCES := \
workloads/ClConvertFp32ToFp16Workload.cpp \
workloads/ClConvolution2dWorkload.cpp \
workloads/ClDepthwiseConvolutionWorkload.cpp \
+ workloads/ClDequantizeWorkload.cpp \
workloads/ClDivisionFloatWorkload.cpp \
workloads/ClFloorFloatWorkload.cpp \
workloads/ClFullyConnectedWorkload.cpp \
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index ad87e953c8..76b8175f25 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -436,6 +436,13 @@ ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseUint8, StridedSlice2DReverseUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
+// Dequantize
+// NOTE: current clframework (46a49a0a8206f0efa7afd514940e180a88ffd732)
+// CLDequantizationLayerKernel accepts DataType::QASYMM8 input
+// and can output DataType::F16 or DataType::F32
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
+
// ============================================================================
// COMPARE tests
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 23668c564b..c055244b1f 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -24,6 +24,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClConvolution2dWorkload.hpp
ClDepthwiseConvolutionWorkload.cpp
ClDepthwiseConvolutionWorkload.hpp
+ ClDequantizeWorkload.cpp
+ ClDequantizeWorkload.hpp
ClDivisionFloatWorkload.cpp
ClDivisionFloatWorkload.hpp
ClFloorFloatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
new file mode 100644
index 0000000000..67a555a020
--- /dev/null
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClDequantizeWorkload.hpp"
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <arm_compute/core/Types.h>
+
+#include <cl/ClLayerSupport.hpp>
+#include <cl/ClTensorHandle.hpp>
+
+#include <boost/polymorphic_pointer_cast.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ return arm_compute::CLDequantizationLayer::validate(&aclInputInfo, &aclOutputInfo);
+}
+
+ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& workloadInfo)
+ : BaseWorkload<DequantizeQueueDescriptor>(descriptor, workloadInfo)
+{
+ arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ m_Data.m_Inputs[0])->GetTensor();
+
+ arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ m_Data.m_Outputs[0])->GetTensor();
+
+ m_Layer.reset(new arm_compute::CLDequantizationLayer());
+ m_Layer->configure(&input, &output);
+ m_Layer->prepare();
+}
+
+void ClDequantizeWorkload::Execute() const
+{
+ if (m_Layer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClDequantizeWorkload_Execute");
+ m_Layer->run();
+ }
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.hpp b/src/backends/cl/workloads/ClDequantizeWorkload.hpp
new file mode 100644
index 0000000000..6e61da2ebf
--- /dev/null
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/functions/CLDequantizationLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class ClDequantizeWorkload : public BaseWorkload<DequantizeQueueDescriptor>
+{
+public:
+ ClDequantizeWorkload(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& workloadInfo);
+
+ void Execute() const override;
+
+private:
+ mutable std::unique_ptr<arm_compute::CLDequantizationLayer> m_Layer;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 0060412770..b836781166 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -11,6 +11,7 @@
#include "ClBatchToSpaceNdWorkload.hpp"
#include "ClConvolution2dWorkload.hpp"
#include "ClDepthwiseConvolutionWorkload.hpp"
+#include "ClDequantizeWorkload.hpp"
#include "ClDivisionFloatWorkload.hpp"
#include "ClFloorFloatWorkload.hpp"
#include "ClFullyConnectedWorkload.hpp"