From 98b0dcb7f285f0009aee52cf526e4bfacd558d6d Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Tue, 18 Jan 2022 22:09:29 +0000 Subject: IVGCVSW-6683-6684 Add ClBaseWorkload and NeonBaseWorkload * Neon/Cl Activation workloads inherit from Cl/Neon BaseWorkload * Unit Test for ReplaceTensorHandle functions Signed-off-by: Teresa Charlin Change-Id: I985e34b93a96405735402a6d3b947957afbe2857 --- src/armnnTestUtils/CreateWorkload.hpp | 4 +-- src/backends/cl/test/ClCreateWorkloadTests.cpp | 32 +++++++++++++++++ src/backends/cl/workloads/ClActivationWorkload.cpp | 4 +-- src/backends/cl/workloads/ClActivationWorkload.hpp | 6 ++-- src/backends/cl/workloads/ClBaseWorkload.hpp | 40 ++++++++++++++++++++++ src/backends/neon/test/NeonCreateWorkloadTests.cpp | 34 ++++++++++++++++++ .../neon/workloads/NeonActivationWorkload.cpp | 4 +-- .../neon/workloads/NeonActivationWorkload.hpp | 7 ++-- src/backends/neon/workloads/NeonBaseWorkload.hpp | 40 ++++++++++++++++++++++ 9 files changed, 158 insertions(+), 13 deletions(-) create mode 100644 src/backends/cl/workloads/ClBaseWorkload.hpp create mode 100644 src/backends/neon/workloads/NeonBaseWorkload.hpp diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp index 15de5b5ddb..d01919c09d 100644 --- a/src/armnnTestUtils/CreateWorkload.hpp +++ b/src/armnnTestUtils/CreateWorkload.hpp @@ -68,7 +68,7 @@ std::unique_ptr CreateActivationWorkloadTest(armnn::IWorkloa { // Creates the layer we're testing. ActivationDescriptor layerDesc; - layerDesc.m_Function = ActivationFunction::Abs; + layerDesc.m_Function = ActivationFunction::ReLu; layerDesc.m_A = 3.5f; layerDesc.m_B = -10.0f; @@ -94,7 +94,7 @@ std::unique_ptr CreateActivationWorkloadTest(armnn::IWorkloa CHECK(queueDescriptor.m_Outputs.size() == 1); CHECK(queueDescriptor.m_Parameters.m_A == 3.5f); CHECK(queueDescriptor.m_Parameters.m_B == -10.0f); - CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs)); + CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::ReLu)); // Returns so we can do extra, backend-specific tests. return workload; diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index 34914fca50..d8b2d4f786 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -1297,4 +1297,36 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQuantizedLstmWorkload") ClCreateQuantizedLstmWorkloadTest(); } +template +static void ClCreateActivationWorkloadReplaceFunctionsTest() +{ + std::shared_ptr memoryManager = std::make_shared( + std::make_unique()); + + Graph graph; + ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(memoryManager); + // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType) + auto workloadPtr = CreateActivationWorkloadTest(factory, graph); + + // new input and output tensor handlers are created and then replace in the workload + const ClTensorHandleFactory tensorHandleFactory(memoryManager); + TensorInfo inputInfo({2 , 2}, DataType::Float16); + TensorInfo outputInfo({2 , 2}, DataType::Float16); + unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo, true); + inputHandle->Manage(); + inputHandle->Allocate(); + unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo, true); + outputHandle->Manage(); + outputHandle->Allocate(); + + unsigned int slot = 0; + CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException); + CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException); +} + +TEST_CASE("ClReplaceFunctionsfromFloat32toFloat16ActivationWorkload") +{ + ClCreateActivationWorkloadReplaceFunctionsTest(); +} + } diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp index 91a44f430a..a92f8fb573 100644 --- a/src/backends/cl/workloads/ClActivationWorkload.cpp +++ b/src/backends/cl/workloads/ClActivationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input, ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info, const arm_compute::CLCompileContext& clCompileContext) - : BaseWorkload(descriptor, info) + : ClBaseWorkload(descriptor, info) { // Report Profiling Details ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClActivationWorkload_Construct", diff --git a/src/backends/cl/workloads/ClActivationWorkload.hpp b/src/backends/cl/workloads/ClActivationWorkload.hpp index 683229e1f3..14835fb40b 100644 --- a/src/backends/cl/workloads/ClActivationWorkload.hpp +++ b/src/backends/cl/workloads/ClActivationWorkload.hpp @@ -1,11 +1,11 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once -#include +#include "ClBaseWorkload.hpp" #include @@ -15,7 +15,7 @@ arm_compute::Status ClActivationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor); -class ClActivationWorkload : public BaseWorkload +class ClActivationWorkload : public ClBaseWorkload { public: ClActivationWorkload(const ActivationQueueDescriptor& descriptor, diff --git a/src/backends/cl/workloads/ClBaseWorkload.hpp b/src/backends/cl/workloads/ClBaseWorkload.hpp new file mode 100644 index 0000000000..e74fc84f4f --- /dev/null +++ b/src/backends/cl/workloads/ClBaseWorkload.hpp @@ -0,0 +1,40 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ +template +class ClBaseWorkload : public BaseWorkload +{ +public: + ClBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) + {} + + // Replace input tensor handle with the given TensorHandle and call Reconfigure() + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override + { + this->m_Data.m_Inputs[slot] = tensorHandle; + Reconfigure(); + } + + // Replace output tensor handle with the given TensorHandle and call Reconfigure() + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override + { + this->m_Data.m_Outputs[slot] = tensorHandle; + Reconfigure(); + } + + // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default. + virtual void Reconfigure() + { + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); + } +}; +} //namespace armnn diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index c1563fe046..66718cc481 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -1059,4 +1059,38 @@ TEST_CASE("CreateQLstmWorkloadTest") NeonCreateQLstmWorkloadTest(); } +template +static void NeonCreateActivationWorkloadReplaceFunctionsTest() +{ + shared_ptr memoryManager = make_shared(); + + Graph graph; + NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(memoryManager); + // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType) + auto workloadPtr = CreateActivationWorkloadTest(factory, graph); + + // new input and output tensor handlers are created and then replace in the workload + const NeonTensorHandleFactory tensorHandleFactory(memoryManager); + TensorInfo inputInfo({2 , 2}, DataType::Float16); + TensorInfo outputInfo({2 , 2}, DataType::Float16); + unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); + inputHandle->Allocate(); + unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); + outputHandle->Allocate(); + + unsigned int slot = 0; + CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException); + CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException); +} + +TEST_CASE("NeonReplaceFunctionsfromFloat32toFloat16ActivationWorkload") +{ + NeonCreateActivationWorkloadReplaceFunctionsTest(); +} + +TEST_CASE("NeonReplaceFunctionsfromUint8toFloat16ActivationWorkload") +{ + NeonCreateActivationWorkloadReplaceFunctionsTest(); +} + } diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp index dd4c97d76b..0fadc120ba 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.cpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,7 +31,7 @@ arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input, NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) - : BaseWorkload(descriptor, info) + : NeonBaseWorkload(descriptor, info) { // Report Profiling Details ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonActivationWorkload_Construct", diff --git a/src/backends/neon/workloads/NeonActivationWorkload.hpp b/src/backends/neon/workloads/NeonActivationWorkload.hpp index c3d6cc1bce..72ad477834 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.hpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.hpp @@ -1,23 +1,22 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once -#include +#include "NeonBaseWorkload.hpp" #include #include namespace armnn { - arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor); -class NeonActivationWorkload : public BaseWorkload +class NeonActivationWorkload : public NeonBaseWorkload { public: NeonActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info); diff --git a/src/backends/neon/workloads/NeonBaseWorkload.hpp b/src/backends/neon/workloads/NeonBaseWorkload.hpp new file mode 100644 index 0000000000..a92f35a173 --- /dev/null +++ b/src/backends/neon/workloads/NeonBaseWorkload.hpp @@ -0,0 +1,40 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ +template +class NeonBaseWorkload : public BaseWorkload +{ +public: + NeonBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) + {} + + // Replace input tensor handle with the given TensorHandle and call Reconfigure() + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override + { + this->m_Data.m_Inputs[slot] = tensorHandle; + Reconfigure(); + } + + // Replace output tensor handle with the given TensorHandle and call Reconfigure() + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override + { + this->m_Data.m_Outputs[slot] = tensorHandle; + Reconfigure(); + } + + // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default. + virtual void Reconfigure() + { + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); + } +}; +} //namespace armnn -- cgit v1.2.1