diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-01-18 22:09:29 +0000 |
---|---|---|
committer | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-01-26 14:38:27 +0000 |
commit | 98b0dcb7f285f0009aee52cf526e4bfacd558d6d (patch) | |
tree | 0490ed7bedc9774c5dcaa134534d5804e4e33763 /src/backends/neon | |
parent | 524a99ccf69b4bf5c8188bc9e4d89f402c374cae (diff) | |
download | armnn-98b0dcb7f285f0009aee52cf526e4bfacd558d6d.tar.gz |
IVGCVSW-6683-6684 Add ClBaseWorkload and NeonBaseWorkload
* Neon/Cl Activation workloads inherit from Cl/Neon BaseWorkload
* Unit Test for ReplaceTensorHandle functions
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I985e34b93a96405735402a6d3b947957afbe2857
Diffstat (limited to 'src/backends/neon')
4 files changed, 79 insertions, 6 deletions
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index c1563fe046..66718cc481 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -1059,4 +1059,38 @@ TEST_CASE("CreateQLstmWorkloadTest") NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>(); } +template <armnn::DataType DataType> +static void NeonCreateActivationWorkloadReplaceFunctionsTest() +{ + shared_ptr<NeonMemoryManager> memoryManager = make_shared<NeonMemoryManager>(); + + Graph graph; + NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(memoryManager); + // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType) + auto workloadPtr = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph); + + // new input and output tensor handlers are created and then replace in the workload + const NeonTensorHandleFactory tensorHandleFactory(memoryManager); + TensorInfo inputInfo({2 , 2}, DataType::Float16); + TensorInfo outputInfo({2 , 2}, DataType::Float16); + unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); + inputHandle->Allocate(); + unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); + outputHandle->Allocate(); + + unsigned int slot = 0; + CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException); + CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException); +} + +TEST_CASE("NeonReplaceFunctionsfromFloat32toFloat16ActivationWorkload") +{ + NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::Float32>(); +} + +TEST_CASE("NeonReplaceFunctionsfromUint8toFloat16ActivationWorkload") +{ + NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::QAsymmU8>(); +} + } diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp index dd4c97d76b..0fadc120ba 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.cpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,7 +31,7 @@ arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input, NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) - : BaseWorkload<ActivationQueueDescriptor>(descriptor, info) + : NeonBaseWorkload<ActivationQueueDescriptor>(descriptor, info) { // Report Profiling Details ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonActivationWorkload_Construct", diff --git a/src/backends/neon/workloads/NeonActivationWorkload.hpp b/src/backends/neon/workloads/NeonActivationWorkload.hpp index c3d6cc1bce..72ad477834 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.hpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.hpp @@ -1,23 +1,22 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once -#include <armnn/backends/Workload.hpp> +#include "NeonBaseWorkload.hpp" #include <arm_compute/core/Error.h> #include <arm_compute/runtime/IFunction.h> namespace armnn { - arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor); -class NeonActivationWorkload : public BaseWorkload<ActivationQueueDescriptor> +class NeonActivationWorkload : public NeonBaseWorkload<ActivationQueueDescriptor> { public: NeonActivationWorkload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info); diff --git a/src/backends/neon/workloads/NeonBaseWorkload.hpp b/src/backends/neon/workloads/NeonBaseWorkload.hpp new file mode 100644 index 0000000000..a92f35a173 --- /dev/null +++ b/src/backends/neon/workloads/NeonBaseWorkload.hpp @@ -0,0 +1,40 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <armnn/backends/Workload.hpp> + +namespace armnn +{ +template <typename QueueDescriptor> +class NeonBaseWorkload : public BaseWorkload<QueueDescriptor> +{ +public: + NeonBaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<QueueDescriptor>(descriptor, info) + {} + + // Replace input tensor handle with the given TensorHandle and call Reconfigure() + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override + { + this->m_Data.m_Inputs[slot] = tensorHandle; + Reconfigure(); + } + + // Replace output tensor handle with the given TensorHandle and call Reconfigure() + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override + { + this->m_Data.m_Outputs[slot] = tensorHandle; + Reconfigure(); + } + + // Reconfigure the workload configuration. Throw armnn::UnimplementedException by default. + virtual void Reconfigure() + { + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); + } +}; +} //namespace armnn |