From 1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Tue, 1 Nov 2022 15:59:50 +0000 Subject: IVGCVSW-7307 Add CpuAcc Batch MatMul Workload * Call dedicated MatMul kernel in ACL * Add int8 tests * Add int8 to documentation * Force tensors to be dynamic (nonConst) as per request of ACL Signed-off-by: Teresa Charlin Change-Id: I992ae9aae1174214607bf29305f21cdeaf3fdc1b --- .../test/layerTests/BatchMatMulTestImpl.cpp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'src/backends/backendsCommon/test/layerTests') diff --git a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp index 74bd97f103..504ca1d304 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -14,6 +14,7 @@ #include #include #include +#include template @@ -29,6 +30,7 @@ LayerTestResult BatchMatMulTestImpl( const armnn::TensorInfo& inputYInfo, const armnn::TensorInfo& outputInfo) { + LayerTestResult result(outputInfo); std::vector outputActual(outputInfo.GetNumElements()); std::unique_ptr inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo); @@ -36,13 +38,27 @@ LayerTestResult BatchMatMulTestImpl( std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); armnn::BatchMatMulQueueDescriptor queueDescriptor; - queueDescriptor.m_Parameters = descriptor; + queueDescriptor.m_Parameters = std::move(descriptor); armnn::WorkloadInfo workloadInfo; AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get()); AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get()); AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); + // Don't execute if BatchMatMul is not supported, as an exception will be raised. + const armnn::BackendId& backend = workloadFactory.GetBackendId(); + std::string reasonIfUnsupported; + armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend); + result.m_Supported = handle.IsBatchMatMulSupported(inputXInfo, + inputYInfo, + outputInfo, + queueDescriptor.m_Parameters, + reasonIfUnsupported); + if (!result.m_Supported) + { + return result; + } + auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo); inputXHandle->Allocate(); -- cgit v1.2.1