aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-01-18 22:09:29 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2022-01-26 14:38:27 +0000
commit98b0dcb7f285f0009aee52cf526e4bfacd558d6d (patch)
tree0490ed7bedc9774c5dcaa134534d5804e4e33763
parent524a99ccf69b4bf5c8188bc9e4d89f402c374cae (diff)
downloadarmnn-98b0dcb7f285f0009aee52cf526e4bfacd558d6d.tar.gz
IVGCVSW-6683-6684 Add ClBaseWorkload and NeonBaseWorkload
* Neon/Cl Activation workloads inherit from Cl/Neon BaseWorkload * Unit Test for ReplaceTensorHandle functions Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I985e34b93a96405735402a6d3b947957afbe2857
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp4
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp32
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.hpp6
-rw-r--r--src/backends/cl/workloads/ClBaseWorkload.hpp40
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp34
-rw-r--r--src/backends/neon/workloads/NeonActivationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonActivationWorkload.hpp7
-rw-r--r--src/backends/neon/workloads/NeonBaseWorkload.hpp40
9 files changed, 158 insertions, 13 deletions
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 15de5b5ddb..d01919c09d 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -68,7 +68,7 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
{
// Creates the layer we're testing.
ActivationDescriptor layerDesc;
- layerDesc.m_Function = ActivationFunction::Abs;
+ layerDesc.m_Function = ActivationFunction::ReLu;
layerDesc.m_A = 3.5f;
layerDesc.m_B = -10.0f;
@@ -94,7 +94,7 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
CHECK(queueDescriptor.m_Outputs.size() == 1);
CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
- CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
+ CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::ReLu));
// Returns so we can do extra, backend-specific tests.
return workload;
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 34914fca50..d8b2d4f786 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -1297,4 +1297,36 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQuantizedLstmWorkload")
ClCreateQuantizedLstmWorkloadTest<ClQuantizedLstmWorkload>();
}
+template <armnn::DataType DataType>
+static void ClCreateActivationWorkloadReplaceFunctionsTest()
+{
+ std::shared_ptr<ClMemoryManager> memoryManager = std::make_shared<ClMemoryManager>(
+ std::make_unique<arm_compute::CLBufferAllocator>());
+
+ Graph graph;
+ ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(memoryManager);
+ // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType)
+ auto workloadPtr = CreateActivationWorkloadTest<ClActivationWorkload, DataType>(factory, graph);
+
+ // new input and output tensor handlers are created and then replace in the workload
+ const ClTensorHandleFactory tensorHandleFactory(memoryManager);
+ TensorInfo inputInfo({2 , 2}, DataType::Float16);
+ TensorInfo outputInfo({2 , 2}, DataType::Float16);
+ unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo, true);
+ inputHandle->Manage();
+ inputHandle->Allocate();
+ unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo, true);
+ outputHandle->Manage();
+ outputHandle->Allocate();
+
+ unsigned int slot = 0;
+ CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException);
+ CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException);
+}
+
+TEST_CASE("ClReplaceFunctionsfromFloat32toFloat16ActivationWorkload")
+{
+ ClCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::Float32>();
+}
+
}
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 91a44f430a..a92f8fb573 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input,
ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info,
const arm_compute::CLCompileContext& clCompileContext)
- : BaseWorkload<ActivationQueueDescriptor>(descriptor, info)
+ : ClBaseWorkload<ActivationQueueDescriptor>(descriptor, info)
{
// Report Profiling Details
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClActivationWorkload_Construct",
diff --git a/src/backends/cl/workloads/ClActivationWorkload.hpp b/src/backends/cl/workloads/ClActivationWorkload.hpp
index 683229e1f3..14835fb40b 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.hpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.hpp
@@ -1,11 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/backends/Workload.hpp>
+#include "ClBaseWorkload.hpp"
#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
@@ -15,7 +15,7 @@ arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor);
-class ClActivationWorkload : public BaseWorkload<ActivationQueueDescriptor>
+class ClActivationWorkload : public ClBaseWorkload<ActivationQueueDescriptor>
{
public:
ClActivationWorkload(const ActivationQueueDescriptor& descriptor,
diff --git a/src/backends/cl/workloads/ClBaseWorkload.hpp b/src/backends/cl/workloads/ClBaseWorkload.hpp
new file mode 100644
index 0000000000..e74fc84f4f
--- /dev/null
+++ b/src/backends/cl/workloads/ClBaseWorkload.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/backends/Workload.hpp>
+
+namespace armnn
+{
+template <typename QueueDescriptor>
+class ClBaseWorkload : public BaseWorkload<QueueDescriptor>
+{
+public:
+ ClBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<QueueDescriptor>(descriptor, info)
+ {}
+
+ // Replace input tensor handle with the given TensorHandle and call Reconfigure()
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+ {
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ Reconfigure();
+ }
+
+ // Replace output tensor handle with the given TensorHandle and call Reconfigure()
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+ {
+ this->m_Data.m_Outputs[slot] = tensorHandle;
+ Reconfigure();
+ }
+
+ // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default.
+ virtual void Reconfigure()
+ {
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+ }
+};
+} //namespace armnn
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index c1563fe046..66718cc481 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1059,4 +1059,38 @@ TEST_CASE("CreateQLstmWorkloadTest")
NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>();
}
+template <armnn::DataType DataType>
+static void NeonCreateActivationWorkloadReplaceFunctionsTest()
+{
+ shared_ptr<NeonMemoryManager> memoryManager = make_shared<NeonMemoryManager>();
+
+ Graph graph;
+ NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(memoryManager);
+ // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType)
+ auto workloadPtr = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph);
+
+ // new input and output tensor handlers are created and then replace in the workload
+ const NeonTensorHandleFactory tensorHandleFactory(memoryManager);
+ TensorInfo inputInfo({2 , 2}, DataType::Float16);
+ TensorInfo outputInfo({2 , 2}, DataType::Float16);
+ unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ inputHandle->Allocate();
+ unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+ outputHandle->Allocate();
+
+ unsigned int slot = 0;
+ CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException);
+ CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException);
+}
+
+TEST_CASE("NeonReplaceFunctionsfromFloat32toFloat16ActivationWorkload")
+{
+ NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::Float32>();
+}
+
+TEST_CASE("NeonReplaceFunctionsfromUint8toFloat16ActivationWorkload")
+{
+ NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::QAsymmU8>();
+}
+
}
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp
index dd4c97d76b..0fadc120ba 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,7 @@ arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input,
NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info)
- : BaseWorkload<ActivationQueueDescriptor>(descriptor, info)
+ : NeonBaseWorkload<ActivationQueueDescriptor>(descriptor, info)
{
// Report Profiling Details
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonActivationWorkload_Construct",
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.hpp b/src/backends/neon/workloads/NeonActivationWorkload.hpp
index c3d6cc1bce..72ad477834 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.hpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.hpp
@@ -1,23 +1,22 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/backends/Workload.hpp>
+#include "NeonBaseWorkload.hpp"
#include <arm_compute/core/Error.h>
#include <arm_compute/runtime/IFunction.h>
namespace armnn
{
-
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor);
-class NeonActivationWorkload : public BaseWorkload<ActivationQueueDescriptor>
+class NeonActivationWorkload : public NeonBaseWorkload<ActivationQueueDescriptor>
{
public:
NeonActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info);
diff --git a/src/backends/neon/workloads/NeonBaseWorkload.hpp b/src/backends/neon/workloads/NeonBaseWorkload.hpp
new file mode 100644
index 0000000000..a92f35a173
--- /dev/null
+++ b/src/backends/neon/workloads/NeonBaseWorkload.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/backends/Workload.hpp>
+
+namespace armnn
+{
+template <typename QueueDescriptor>
+class NeonBaseWorkload : public BaseWorkload<QueueDescriptor>
+{
+public:
+ NeonBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<QueueDescriptor>(descriptor, info)
+ {}
+
+ // Replace input tensor handle with the given TensorHandle and call Reconfigure()
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+ {
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ Reconfigure();
+ }
+
+ // Replace output tensor handle with the given TensorHandle and call Reconfigure()
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override
+ {
+ this->m_Data.m_Outputs[slot] = tensorHandle;
+ Reconfigure();
+ }
+
+ // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default.
+ virtual void Reconfigure()
+ {
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+ }
+};
+} //namespace armnn