From 1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Tue, 1 Nov 2022 15:59:50 +0000 Subject: IVGCVSW-7307 Add CpuAcc Batch MatMul Workload * Call dedicated MatMul kernel in ACL * Add int8 tests * Add int8 to documentation * Force tensors to be dynamic (nonConst) as per request of ACL Signed-off-by: Teresa Charlin Change-Id: I992ae9aae1174214607bf29305f21cdeaf3fdc1b --- .../test/BatchMatMulEndToEndTestImpl.hpp | 42 +++++++++++++--------- .../test/layerTests/BatchMatMulTestImpl.cpp | 20 +++++++++-- 2 files changed, 43 insertions(+), 19 deletions(-) (limited to 'src/backends/backendsCommon/test') diff --git a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp index 905a56d53a..98e75cb8df 100644 --- a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -55,30 +55,38 @@ void BatchMatMulEndToEnd(const std::vector& backends) const TensorShape& inputYShape = { 2, 2, 2 }; const TensorShape& outputShape = { 2, 2, 2 }; - INetworkPtr network = CreateBatchMatMulNetwork(inputXShape, inputYShape, outputShape); + constexpr float qScale = 1.0f; + constexpr int32_t qOffset = 0; + + INetworkPtr network = CreateBatchMatMulNetwork(inputXShape, inputYShape, outputShape, qScale, qOffset); CHECK(network); - std::vector inputXData{ 1, 2, - 3, 4, + std::vector floatInputXData{ 1., 2., + 3., 4., + + 9., 10., + 11., 12. }; + std::vector inputXData = armnnUtils::QuantizedVector(floatInputXData, qScale, qOffset); + + std::vector floatInputYData{ 5., 7., + 6., 8., - 9, 10, - 11, 12 }; - std::vector inputYData{ 5, 7, - 6, 8, + 13., 15., + 14., 16. }; + std::vector inputYData = armnnUtils::QuantizedVector(floatInputYData, qScale, qOffset); - 13, 15, - 14, 16 }; - std::vector expectedOutput{ 19, 22, - 43, 50, + std::vector floatExpectedOutputData{ 19., 22., + 43., 50., - 267, 286, - 323, 346 }; + 267., 286., + 323., 346. }; + std::vector expectedOutputData = armnnUtils::QuantizedVector(floatExpectedOutputData, qScale, qOffset); - std::map> inputTensorData = {{ 0, inputXData }, {1, inputYData}}; - std::map> expectedOutputData = { { 0, expectedOutput } }; + std::map> inputTensor = {{ 0, inputXData }, {1, inputYData}}; + std::map> expectedOutput = { { 0, expectedOutputData } }; - EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); + EndToEndLayerTestImpl(std::move(network), inputTensor, expectedOutput, backends); } } // anonymous namespace \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp index 74bd97f103..504ca1d304 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -14,6 +14,7 @@ #include #include #include +#include template @@ -29,6 +30,7 @@ LayerTestResult BatchMatMulTestImpl( const armnn::TensorInfo& inputYInfo, const armnn::TensorInfo& outputInfo) { + LayerTestResult result(outputInfo); std::vector outputActual(outputInfo.GetNumElements()); std::unique_ptr inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo); @@ -36,13 +38,27 @@ LayerTestResult BatchMatMulTestImpl( std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); armnn::BatchMatMulQueueDescriptor queueDescriptor; - queueDescriptor.m_Parameters = descriptor; + queueDescriptor.m_Parameters = std::move(descriptor); armnn::WorkloadInfo workloadInfo; AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get()); AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get()); AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); + // Don't execute if BatchMatMul is not supported, as an exception will be raised. + const armnn::BackendId& backend = workloadFactory.GetBackendId(); + std::string reasonIfUnsupported; + armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend); + result.m_Supported = handle.IsBatchMatMulSupported(inputXInfo, + inputYInfo, + outputInfo, + queueDescriptor.m_Parameters, + reasonIfUnsupported); + if (!result.m_Supported) + { + return result; + } + auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo); inputXHandle->Allocate(); -- cgit v1.2.1