diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-11-01 15:59:50 +0000 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2023-05-08 13:16:01 +0000 |
commit | 1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45 (patch) | |
tree | bbb846edda64445c1e033b182e5a079c8d5728d8 /src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp | |
parent | c52190a7e80cf238ba1d8630e5cc36ec7c7849e2 (diff) | |
download | armnn-1fe6c8170ae2fe90b53fb71b7570aec9dfe75c45.tar.gz |
IVGCVSW-7307 Add CpuAcc Batch MatMul Workload
* Call dedicated MatMul kernel in ACL
* Add int8 tests
* Add int8 to documentation
* Force tensors to be dynamic (nonConst) as per request of ACL
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I992ae9aae1174214607bf29305f21cdeaf3fdc1b
Diffstat (limited to 'src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp | 42 |
1 files changed, 25 insertions, 17 deletions
diff --git a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp index 905a56d53a..98e75cb8df 100644 --- a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -55,30 +55,38 @@ void BatchMatMulEndToEnd(const std::vector<armnn::BackendId>& backends) const TensorShape& inputYShape = { 2, 2, 2 }; const TensorShape& outputShape = { 2, 2, 2 }; - INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape, inputYShape, outputShape); + constexpr float qScale = 1.0f; + constexpr int32_t qOffset = 0; + + INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape, inputYShape, outputShape, qScale, qOffset); CHECK(network); - std::vector<T> inputXData{ 1, 2, - 3, 4, + std::vector<float> floatInputXData{ 1., 2., + 3., 4., + + 9., 10., + 11., 12. }; + std::vector<T> inputXData = armnnUtils::QuantizedVector<T>(floatInputXData, qScale, qOffset); + + std::vector<float> floatInputYData{ 5., 7., + 6., 8., - 9, 10, - 11, 12 }; - std::vector<T> inputYData{ 5, 7, - 6, 8, + 13., 15., + 14., 16. }; + std::vector<T> inputYData = armnnUtils::QuantizedVector<T>(floatInputYData, qScale, qOffset); - 13, 15, - 14, 16 }; - std::vector<T> expectedOutput{ 19, 22, - 43, 50, + std::vector<float> floatExpectedOutputData{ 19., 22., + 43., 50., - 267, 286, - 323, 346 }; + 267., 286., + 323., 346. }; + std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset); - std::map<int, std::vector<T>> inputTensorData = {{ 0, inputXData }, {1, inputYData}}; - std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } }; + std::map<int, std::vector<T>> inputTensor = {{ 0, inputXData }, {1, inputYData}}; + std::map<int, std::vector<T>> expectedOutput = { { 0, expectedOutputData } }; - EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends); + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends); } } // anonymous namespace
\ No newline at end of file |