diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-11-01 15:59:50 +0000 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2023-05-08 13:16:01 +0000 |
commit | 1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45 (patch) | |
tree | bbb846edda64445c1e033b182e5a079c8d5728d8 /src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp | |
parent | c52190a7e80cf238ba1d8630e5cc36ec7c7849e2 (diff) | |
download | armnn-1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45.tar.gz |
IVGCVSW-7307 Add CpuAcc Batch MatMul Workload
* Call dedicated MatMul kernel in ACL
* Add int8 tests
* Add int8 to documentation
* Force tensors to be dynamic (nonConst) as per request of ACL
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I992ae9aae1174214607bf29305f21cdeaf3fdc1b
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp index 74bd97f103..504ca1d304 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -14,6 +14,7 @@ #include <armnnUtils/QuantizeHelper.hpp> #include <armnnTestUtils/TensorCopyUtils.hpp> #include <armnn/Optional.hpp> +#include <armnn/BackendHelper.hpp> template<armnn::DataType ArmnnType, typename T, std::size_t NumDims> @@ -29,6 +30,7 @@ LayerTestResult<T, NumDims> BatchMatMulTestImpl( const armnn::TensorInfo& inputYInfo, const armnn::TensorInfo& outputInfo) { + LayerTestResult<T, NumDims> result(outputInfo); std::vector<T> outputActual(outputInfo.GetNumElements()); std::unique_ptr<armnn::ITensorHandle> inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo); @@ -36,13 +38,27 @@ LayerTestResult<T, NumDims> BatchMatMulTestImpl( std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); armnn::BatchMatMulQueueDescriptor queueDescriptor; - queueDescriptor.m_Parameters = descriptor; + queueDescriptor.m_Parameters = std::move(descriptor); armnn::WorkloadInfo workloadInfo; AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get()); AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get()); AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); + // Don't execute if BatchMatMul is not supported, as an exception will be raised. + const armnn::BackendId& backend = workloadFactory.GetBackendId(); + std::string reasonIfUnsupported; + armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend); + result.m_Supported = handle.IsBatchMatMulSupported(inputXInfo, + inputYInfo, + outputInfo, + queueDescriptor.m_Parameters, + reasonIfUnsupported); + if (!result.m_Supported) + { + return result; + } + auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo); inputXHandle->Allocate(); |