From d29d09de2bcee68d0820e6ee7549033b05c6c469 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Mon, 26 Jun 2023 11:52:40 +0100 Subject: Update ACL pin to c952596e70f2fe0073029f053e329a4e930ced8c * activationInfo passed in directly to configure() rather than part of matMulInfo Signed-off-by: Nikhil Raj Change-Id: I546def1c1e1cabaf50629f7d78ae0ba459766ed4 --- src/backends/cl/workloads/ClBatchMatMulWorkload.cpp | 8 ++++---- src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp | 7 +++---- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'src/backends') diff --git a/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp b/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp index cfed1b83a1..313c3453a5 100644 --- a/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp +++ b/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp @@ -50,9 +50,8 @@ arm_compute::Status ClBatchMatMulValidate(const TensorInfo& inputInfoX, arm_compute::MatMulInfo matMulInfo; matMulInfo.adj_lhs(descriptor.m_TransposeX); matMulInfo.adj_rhs(descriptor.m_TransposeY); - matMulInfo.fused_activation(activationInfo); - return arm_compute::CLMatMul::validate(&aclInputInfoX, &aclInputInfoY, &aclOutputInfo, matMulInfo); + return arm_compute::CLMatMul::validate(&aclInputInfoX, &aclInputInfoY, &aclOutputInfo, matMulInfo, activationInfo); } ClBatchMatMulWorkload::ClBatchMatMulWorkload(const BatchMatMulQueueDescriptor& descriptor, @@ -92,9 +91,10 @@ ClBatchMatMulWorkload::ClBatchMatMulWorkload(const BatchMatMulQueueDescriptor& d arm_compute::MatMulInfo matMulInfo; matMulInfo.adj_lhs(descriptor.m_Parameters.m_TransposeX); matMulInfo.adj_rhs(descriptor.m_Parameters.m_TransposeY); - matMulInfo.fused_activation(activationInfo); - m_MatMulLayer.configure(clCompileContext, &inputX, &inputY, &output, matMulInfo); + arm_compute::GpuMatMulSettings settings; + + m_MatMulLayer.configure(clCompileContext, &inputX, &inputY, &output, matMulInfo, settings, activationInfo); // Report Profiling Details WorkloadInfo detailsInfo; diff --git a/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp b/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp index 628e314046..7f2b6eaf99 100644 --- a/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp @@ -46,12 +46,12 @@ arm_compute::Status NeonBatchMatMulValidate(const TensorInfo& inputInfoX, arm_compute::MatMulInfo matMulInfo; matMulInfo.adj_lhs(descriptor.m_TransposeX); matMulInfo.adj_rhs(descriptor.m_TransposeY); - matMulInfo.fused_activation(activationInfo); arm_compute::CpuMatMulSettings settings; settings.fast_math(isFastMathEnabled); - return arm_compute::NEMatMul::validate(&aclInputInfoX, &aclInputInfoY, &aclOutputInfo, matMulInfo, settings); + return arm_compute::NEMatMul::validate(&aclInputInfoX, &aclInputInfoY, &aclOutputInfo, matMulInfo, settings, + activationInfo); } NeonBatchMatMulWorkload::NeonBatchMatMulWorkload(const BatchMatMulQueueDescriptor& descriptor, @@ -84,12 +84,11 @@ NeonBatchMatMulWorkload::NeonBatchMatMulWorkload(const BatchMatMulQueueDescripto arm_compute::MatMulInfo matMulInfo; matMulInfo.adj_lhs(descriptor.m_Parameters.m_TransposeX); matMulInfo.adj_rhs(descriptor.m_Parameters.m_TransposeY); - matMulInfo.fused_activation(activationInfo); arm_compute::CpuMatMulSettings settings; settings.fast_math(isFastMathEnabled); - m_MatMulLayer.configure(&inputX, &inputY, &output, matMulInfo, settings); + m_MatMulLayer.configure(&inputX, &inputY, &output, matMulInfo, settings, activationInfo); // Report Profiling Details WorkloadInfo detailsInfo; -- cgit v1.2.1