diff options
author | Samuel Yap <samuel.yap@arm.com> | 2022-07-06 15:36:03 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-07-27 15:58:31 +0100 |
commit | 6b47809e7d6c55d20a05d863ce2f09159f381f85 (patch) | |
tree | c33e5820f89e359c80d8773288e8adb075735039 /src/armnn/layers/BatchMatMulLayer.cpp | |
parent | 919ec71ea7f44bb2d284eb88cda511c2424358b2 (diff) | |
download | armnn-6b47809e7d6c55d20a05d863ce2f09159f381f85.tar.gz |
IVGCVSW-7109: Add Batch MatMul front end support - Reference
* Descriptors added for BatchMatMul
* Layer definition added
* Input validation added (will likely change when opt. param support comes in)
* Ref workload implementation for BatchMatMul added (will also change with opt. param support)
* Ref layer tests made for BatchMatMul
* CMake and other build files updated
Signed-off-by: Samuel Yap <samuel.yap@arm.com>
Change-Id: Ic885301da543ee0fbe7922b85e7f9658c4efc617
Diffstat (limited to 'src/armnn/layers/BatchMatMulLayer.cpp')
-rw-r--r-- | src/armnn/layers/BatchMatMulLayer.cpp | 97 |
1 files changed, 97 insertions, 0 deletions
diff --git a/src/armnn/layers/BatchMatMulLayer.cpp b/src/armnn/layers/BatchMatMulLayer.cpp new file mode 100644 index 0000000000..501de2d091 --- /dev/null +++ b/src/armnn/layers/BatchMatMulLayer.cpp @@ -0,0 +1,97 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "BatchMatMulLayer.hpp" + +#include <armnn/backends/WorkloadFactory.hpp> +#include "layers/LayerCloneBase.hpp" + +namespace armnn +{ + +BatchMatMulLayer::BatchMatMulLayer(const BatchMatMulDescriptor& param, const char* name) + : LayerWithParameters(2, 1, LayerType::BatchMatMul, param, name) +{} + +std::unique_ptr<IWorkload> BatchMatMulLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + BatchMatMulQueueDescriptor descriptor; + SetAdditionalInfo(descriptor); + + return factory.CreateWorkload(LayerType::BatchMatMul, descriptor, PrepInfoAndDesc(descriptor)); +} + +BatchMatMulLayer* BatchMatMulLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase<BatchMatMulLayer>(graph, m_Param, GetName()); + + return std::move(layer); +} + +std::vector<TensorShape> BatchMatMulLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const +{ + ARMNN_ASSERT(inputShapes.size() == 2); + + TensorShape inputXShape = inputShapes[0]; + TensorShape inputYShape = inputShapes[1]; + + // Note: Take into account what pre-adjoint or pre-transposing will do to the inferred output shape + + TensorShape& longerInput = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()? + inputXShape:inputYShape; + TensorShape& shorterInput = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()? + inputYShape:inputXShape; + + unsigned int inputNumDimsOffset = longerInput.GetNumDimensions() - shorterInput.GetNumDimensions(); + + unsigned int outputNumDimensions = longerInput.GetNumDimensions(); + + std::vector<unsigned int> tensorDimensions(outputNumDimensions, 0); + + auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(m_Param, inputXShape, inputYShape); + const auto& longerAxesToMul = (axesToMul.first.first >= axesToMul.second.first && + axesToMul.first.second >= axesToMul.second.second) ? + axesToMul.first : axesToMul.second; + + for (unsigned int i = 0; i < outputNumDimensions; ++i) + { + if (i == longerAxesToMul.first) + { + tensorDimensions[i] = &shorterInput == &inputXShape ? inputXShape[i - inputNumDimsOffset] : inputXShape[i]; + } + else if(i == longerAxesToMul.second) + { + tensorDimensions[i] = &shorterInput == &inputYShape ? inputYShape[i - inputNumDimsOffset] : inputYShape[i]; + } + else // The other dimensions not to be multiplied (but may be broadcasted) + { + // Does NOT validate whether it's a valid broadcast - that's done in the validate func in WorkloadData.cpp + tensorDimensions[i] = static_cast<int>(i) - static_cast<int>(inputNumDimsOffset) < 0 ? + longerInput[i] : + std::max(longerInput[i], shorterInput[i - inputNumDimsOffset]); + } + } + + auto outputShape = TensorShape(outputNumDimensions, tensorDimensions.data()); + return std::vector<TensorShape>({ outputShape }); +} + +void BatchMatMulLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(2, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); + + ARMNN_ASSERT(inferredShapes.size() == 1); + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchMatMulLayer"); +} + +} // namespace armnn
\ No newline at end of file |