aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-11-01 15:59:50 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2023-05-08 13:16:01 +0000
commit1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45 (patch)
treebbb846edda64445c1e033b182e5a079c8d5728d8 /src/backends/backendsCommon
parentc52190a7e80cf238ba1d8630e5cc36ec7c7849e2 (diff)
downloadarmnn-1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45.tar.gz
IVGCVSW-7307 Add CpuAcc Batch MatMul Workload
* Call dedicated MatMul kernel in ACL * Add int8 tests * Add int8 to documentation * Force tensors to be dynamic (nonConst) as per request of ACL Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I992ae9aae1174214607bf29305f21cdeaf3fdc1b
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp42
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp20
2 files changed, 43 insertions, 19 deletions
diff --git a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp
index 905a56d53a..98e75cb8df 100644
--- a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -55,30 +55,38 @@ void BatchMatMulEndToEnd(const std::vector<armnn::BackendId>& backends)
const TensorShape& inputYShape = { 2, 2, 2 };
const TensorShape& outputShape = { 2, 2, 2 };
- INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape, inputYShape, outputShape);
+ constexpr float qScale = 1.0f;
+ constexpr int32_t qOffset = 0;
+
+ INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape, inputYShape, outputShape, qScale, qOffset);
CHECK(network);
- std::vector<T> inputXData{ 1, 2,
- 3, 4,
+ std::vector<float> floatInputXData{ 1., 2.,
+ 3., 4.,
+
+ 9., 10.,
+ 11., 12. };
+ std::vector<T> inputXData = armnnUtils::QuantizedVector<T>(floatInputXData, qScale, qOffset);
+
+ std::vector<float> floatInputYData{ 5., 7.,
+ 6., 8.,
- 9, 10,
- 11, 12 };
- std::vector<T> inputYData{ 5, 7,
- 6, 8,
+ 13., 15.,
+ 14., 16. };
+ std::vector<T> inputYData = armnnUtils::QuantizedVector<T>(floatInputYData, qScale, qOffset);
- 13, 15,
- 14, 16 };
- std::vector<T> expectedOutput{ 19, 22,
- 43, 50,
+ std::vector<float> floatExpectedOutputData{ 19., 22.,
+ 43., 50.,
- 267, 286,
- 323, 346 };
+ 267., 286.,
+ 323., 346. };
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
- std::map<int, std::vector<T>> inputTensorData = {{ 0, inputXData }, {1, inputYData}};
- std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
+ std::map<int, std::vector<T>> inputTensor = {{ 0, inputXData }, {1, inputYData}};
+ std::map<int, std::vector<T>> expectedOutput = { { 0, expectedOutputData } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends);
}
} // anonymous namespace \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
index 74bd97f103..504ca1d304 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -14,6 +14,7 @@
#include <armnnUtils/QuantizeHelper.hpp>
#include <armnnTestUtils/TensorCopyUtils.hpp>
#include <armnn/Optional.hpp>
+#include <armnn/BackendHelper.hpp>
template<armnn::DataType ArmnnType, typename T, std::size_t NumDims>
@@ -29,6 +30,7 @@ LayerTestResult<T, NumDims> BatchMatMulTestImpl(
const armnn::TensorInfo& inputYInfo,
const armnn::TensorInfo& outputInfo)
{
+ LayerTestResult<T, NumDims> result(outputInfo);
std::vector<T> outputActual(outputInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo);
@@ -36,13 +38,27 @@ LayerTestResult<T, NumDims> BatchMatMulTestImpl(
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
armnn::BatchMatMulQueueDescriptor queueDescriptor;
- queueDescriptor.m_Parameters = descriptor;
+ queueDescriptor.m_Parameters = std::move(descriptor);
armnn::WorkloadInfo workloadInfo;
AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get());
AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
+ // Don't execute if BatchMatMul is not supported, as an exception will be raised.
+ const armnn::BackendId& backend = workloadFactory.GetBackendId();
+ std::string reasonIfUnsupported;
+ armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
+ result.m_Supported = handle.IsBatchMatMulSupported(inputXInfo,
+ inputYInfo,
+ outputInfo,
+ queueDescriptor.m_Parameters,
+ reasonIfUnsupported);
+ if (!result.m_Supported)
+ {
+ return result;
+ }
+
auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo);
inputXHandle->Allocate();