aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2024-02-08 18:46:38 +0000
committerDavid Monahan <david.monahan@arm.com>2024-02-08 20:32:12 +0000
commit5bda97349eb99151a61ab787a33e9c224ca215be (patch)
tree1120735b62fdee950f0e07f2ba0c3d08963ed849
parent20dda37357ac0d02550f4421de6c8bfe44304f90 (diff)
downloadarmnn-5bda97349eb99151a61ab787a33e9c224ca215be.tar.gz
IVGCVSW-8276 GpuFsa Op: Add MatMul
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib95eb0fd71106e684cb7652917b8de9f0ac73f9c
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.cpp9
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.cpp17
-rw-r--r--src/backends/gpuFsa/layers/CMakeLists.txt2
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaBatchMatMul.cpp106
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaBatchMatMul.hpp22
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp9
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp19
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp46
8 files changed, 229 insertions, 1 deletions
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index de0d01973c..72f8af7b76 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -20,6 +20,7 @@
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
+#include "layers/GpuFsaBatchMatMul.hpp"
#include "layers/GpuFsaCast.hpp"
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
@@ -280,6 +281,14 @@ OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgra
}
break;
}
+ case (LayerType::BatchMatMul):
+ {
+ auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+ auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&base.GetParameters());
+ GpuFsaBatchMatMulCreateOp(preCompiledBlobPtr, input0, input1, *desc);
+ break;
+ }
case (LayerType::DepthwiseConvolution2d):
{
auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 1ee80c964f..85fb03a157 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -10,6 +10,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
#if defined(ARMCOMPUTEGPUFSA_ENABLED)
+#include "layers/GpuFsaBatchMatMul.hpp"
#include "layers/GpuFsaCast.hpp"
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
@@ -76,6 +77,22 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
switch (type)
{
+ case LayerType::BatchMatMul:
+ {
+ if (infos.size() != 3)
+ {
+ throw InvalidArgumentException("Invalid number of BatchMatMul TensorInfos. "
+ "TensorInfos should be of format: {input0, input1 output}.");
+ }
+
+ auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
+
+ FORWARD_LAYER_VALIDATE_FUNC(GpuFsaBatchMatMulValidate,
+ reasonIfUnsupported,
+ infos[0],
+ infos[1],
+ *desc);
+ }
case LayerType::Cast:
{
if (infos.size() != 2)
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 3fe4bdcbc6..37d52e4da1 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -4,6 +4,8 @@
#
list(APPEND armnnGpuFsaBackendLayers_sources
+ GpuFsaBatchMatMul.cpp
+ GpuFsaBatchMatMul.hpp
GpuFsaCast.cpp
GpuFsaCast.hpp
GpuFsaConvolution2d.cpp
diff --git a/src/backends/gpuFsa/layers/GpuFsaBatchMatMul.cpp b/src/backends/gpuFsa/layers/GpuFsaBatchMatMul.cpp
new file mode 100644
index 0000000000..99c899946f
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaBatchMatMul.cpp
@@ -0,0 +1,106 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaBatchMatMul.hpp"
+#include "UtilsGpuFsa.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMatMul.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaBatchMatMulValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const BatchMatMulDescriptor& descriptor)
+{
+ // Create a new workload sketch, for validation purposes
+ auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
+ auto workloadContext = GpuWorkloadContext(&compileCtx);
+ GpuWorkloadSketch sketch{ &workloadContext };
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
+ arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
+
+ MatMulAttributes matMulAttributes{};
+ matMulAttributes.adj_lhs(descriptor.m_TransposeX);
+ matMulAttributes.adj_rhs(descriptor.m_TransposeY);
+ GpuMatMulSettings matmulSettings{};
+ matmulSettings.m0(1);
+ matmulSettings.n0(1);
+ matmulSettings.k0(1);
+
+ return GpuMatMul::validate_op(sketch, inputInfo0, inputInfo1, matMulAttributes, matmulSettings);
+}
+
+void GpuFsaBatchMatMulCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const BatchMatMulDescriptor& descriptor)
+{
+ GpuWorkloadSketch* sketch = blob->sketch.get();
+ GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+ std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
+ std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
+
+ MatMulAttributes matMulAttributes{};
+ matMulAttributes.adj_lhs(descriptor.m_TransposeX);
+ matMulAttributes.adj_rhs(descriptor.m_TransposeY);
+ GpuMatMulSettings matmulSettings{};
+ matmulSettings.m0(1);
+ matmulSettings.n0(1);
+ matmulSettings.k0(1);
+
+ // Validate operator, check status and update reasonIfUnsupported
+ arm_compute::Status aclStatus = GpuMatMul::validate_op(*sketch,
+ inputTensorInfos[0],
+ inputTensorInfos[1],
+ matMulAttributes,
+ matmulSettings);
+
+ const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+ if (!supported)
+ {
+ throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
+ }
+
+ arm_compute::ITensorInfo* addOutputInfo = GpuMatMul::create_op(*sketch,
+ inputTensorInfos[0],
+ inputTensorInfos[1],
+ matMulAttributes,
+ matmulSettings);
+
+ // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+ outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+ GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+ // Store the TensorInfos within the blob as unique_ptrs to be used later
+ blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+ blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaBatchMatMul.hpp b/src/backends/gpuFsa/layers/GpuFsaBatchMatMul.hpp
new file mode 100644
index 0000000000..f7af3a763c
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaBatchMatMul.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+arm_compute::Status GpuFsaBatchMatMulValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const BatchMatMulDescriptor& descriptor);
+
+void GpuFsaBatchMatMulCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const BatchMatMulDescriptor& descriptor);
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index a2708c0e53..7503c4698f 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -5,9 +5,9 @@
#include "backendsCommon/test/EndToEndTestImpl.hpp"
+#include "backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/layerTests/CastTestImpl.hpp"
-
#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
@@ -20,6 +20,13 @@ TEST_SUITE("GpuFsaEndToEnd")
std::vector<BackendId> gpuFsaDefaultBackends = {"GpuFsa"};
+// BatchMatMul
+TEST_CASE("RefBatchMatMulEndToEndFloat32Test")
+{
+ BatchMatMulEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends);
+}
+
+// Cast
TEST_CASE("GpuFsaCastEndtoEndTestFloat32ToFloat16")
{
using namespace half_float::literal;
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index 34af1909e6..b6f7f32ea6 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -17,6 +17,25 @@ using namespace armnn;
TEST_SUITE("GpuFsaLayerSupport")
{
+TEST_CASE("IsLayerSupportedGpuFsaBatchMatMul")
+{
+ TensorInfo input0Info({ 2, 2 }, DataType::Float32);
+ TensorInfo input1Info({ 2, 2 }, DataType::Float32);
+ TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
+
+ BatchMatMulDescriptor desc{};
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::BatchMatMul,
+ {input0Info, input1Info, outputInfo},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(supported);
+}
+
TEST_CASE("IsLayerSupportedCast")
{
armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 6ddb942dea..1e5c976c00 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -15,6 +15,52 @@ using namespace armnn;
TEST_SUITE("GpuFsaOptimizedNetwork")
{
+TEST_CASE("BatchMatMulSupportedOptimizedNetwork")
+{
+ using namespace armnn;
+
+ const float qScale = 1.0f;
+ const int32_t qOffset = 0;
+
+ const TensorShape& input1Shape = { 2, 2 };
+ const TensorShape& input2Shape = { 2, 2 };
+ const TensorShape& outputShape = { 2, 2 };
+
+ TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ BatchMatMulDescriptor desc{};
+
+ IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
+ IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
+ IConnectableLayer* batchMatMulLayer = network->AddBatchMatMulLayer(desc, "batchMatMul");
+ IConnectableLayer* output = network->AddOutputLayer(2, "output");
+
+ Connect(input1, batchMatMulLayer, input1TensorInfo, 0, 0);
+ Connect(input2, batchMatMulLayer, input2TensorInfo, 0, 1);
+ Connect(batchMatMulLayer, output, outputTensorInfo, 0, 0);
+
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+
TEST_CASE("CastSupportedOptimizedNetwork")
{
using namespace armnn;