ArmNN
 24.05
BatchMatMulLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "BatchMatMulLayer.hpp"
6 
8 #include <armnnUtils/Permute.hpp>
10 
11 namespace armnn
12 {
13 
15  : LayerWithParameters(2, 1, LayerType::BatchMatMul, param, name)
16 {}
17 
18 std::unique_ptr<IWorkload> BatchMatMulLayer::CreateWorkload(const IWorkloadFactory& factory) const
19 {
20  BatchMatMulQueueDescriptor descriptor;
21  SetAdditionalInfo(descriptor);
22 
23  return factory.CreateWorkload(LayerType::BatchMatMul, descriptor, PrepInfoAndDesc(descriptor));
24 }
25 
27 {
28  auto layer = CloneBase<BatchMatMulLayer>(graph, m_Param, GetName());
29 
30  return std::move(layer);
31 }
32 
33 std::vector<TensorShape> BatchMatMulLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
34 {
35  if (inputShapes.size() != 2)
36  {
37  throw armnn::LayerValidationException("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
38  "\" - should be \"2\".");
39  }
40 
41  TensorShape inputXShape = inputShapes[0];
42  TensorShape inputYShape = inputShapes[1];
43 
44  // Adjoint is assumed to be square, but we will apply the permute anyway
46  {
48  inputXShape);
49  inputXShape = armnnUtils::Permuted(inputXShape, permuteVec);
50  }
52  {
54  inputYShape);
55  inputYShape = armnnUtils::Permuted(inputYShape, permuteVec);
56  }
57 
58  TensorShape& longerInput = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()?
59  inputXShape : inputYShape;
60  TensorShape& shorterInput = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()?
61  inputYShape : inputXShape;
62 
63  unsigned int inputNumDimsOffset = longerInput.GetNumDimensions() - shorterInput.GetNumDimensions();
64 
65  unsigned int outputNumDimensions = longerInput.GetNumDimensions();
66 
67  std::vector<unsigned int> tensorDimensions(outputNumDimensions, 0);
68 
69  const auto& longerInputDataLayout = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()?
71  auto longerAxesToMul = BatchMatMulDescriptor::GetAxesToMul(longerInputDataLayout,
72  longerInput);
73 
74  for (unsigned int i = 0; i < outputNumDimensions; ++i)
75  {
76  if (i == longerAxesToMul.first)
77  {
78  tensorDimensions[i] = &shorterInput == &inputXShape ? inputXShape[i - inputNumDimsOffset] : inputXShape[i];
79  }
80  else if(i == longerAxesToMul.second)
81  {
82  tensorDimensions[i] = &shorterInput == &inputYShape ? inputYShape[i - inputNumDimsOffset] : inputYShape[i];
83  }
84  else // The other dimensions not to be multiplied (but may be broadcasted)
85  {
86  // Does NOT validate whether it's a valid broadcast - that's done in the validate func in WorkloadData.cpp
87  tensorDimensions[i] = static_cast<int>(i) - static_cast<int>(inputNumDimsOffset) < 0 ?
88  longerInput[i] :
89  std::max(longerInput[i], shorterInput[i - inputNumDimsOffset]);
90  }
91  }
92 
93  auto outputShape = TensorShape(outputNumDimensions, tensorDimensions.data());
94  return std::vector<TensorShape>({ outputShape });
95 }
96 
98 {
100 
101  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
102 
104 
105  auto inferredShapes = InferOutputShapes({
108 
109  if (inferredShapes.size() != 1)
110  {
111  throw armnn::LayerValidationException("inferredShapes has "
112  + std::to_string(inferredShapes.size()) +
113  " elements - should only have 1.");
114  }
115 
116  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchMatMulLayer");
117 }
118 
119 } // namespace armnn
armnn::BatchMatMulQueueDescriptor
Definition: WorkloadData.hpp:753
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1612
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
armnn::BatchMatMulDescriptor::m_AdjointX
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
Definition: Descriptors.hpp:1617
armnn::BatchMatMulDescriptor::GetAxesToMul
static std::pair< unsigned int, unsigned int > GetAxesToMul(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the two axes (for each input) for multiplication.
Definition: Descriptors.cpp:485
armnn::BatchMatMulLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shapes will lead to a valid configuration of BatchMatMulLayer.
Definition: BatchMatMulLayer.cpp:97
armnn::BatchMatMulDescriptor::m_DataLayoutX
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
Definition: Descriptors.hpp:1621
BatchMatMulLayer.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:457
armnn::BatchMatMulDescriptor::GetPermuteVec
static PermutationVector GetPermuteVec(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the axes which will be transposed.
Definition: Descriptors.cpp:523
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::BatchMatMulDescriptor::m_AdjointY
bool m_AdjointY
Definition: Descriptors.hpp:1618
armnn::BatchMatMulLayer::Clone
BatchMatMulLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: BatchMatMulLayer.cpp:26
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:125
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
WorkloadFactory.hpp
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:614
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LayerWithParameters< BatchMatMulDescriptor >::m_Param
BatchMatMulDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::LayerWithParameters< BatchMatMulDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1613
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::BatchMatMulDescriptor::m_DataLayoutY
DataLayout m_DataLayoutY
Definition: Descriptors.hpp:1622
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::BatchMatMulLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from the given input shapes.
Definition: BatchMatMulLayer.cpp:33
armnn::BatchMatMulLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the BatchMatMul type.
Definition: BatchMatMulLayer.cpp:18
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:526
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:303
Permute.hpp
armnn::BatchMatMul
Definition: BatchMatMulImpl.hpp:16
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::BatchMatMulLayer
Definition: BatchMatMulLayer.hpp:13
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::BatchMatMulLayer::BatchMatMulLayer
BatchMatMulLayer(const BatchMatMulDescriptor &param, const char *name)
Constructor to create a BatchMatMulLayer.
Definition: BatchMatMulLayer.cpp:14
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:410
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::Graph
Definition: Graph.hpp:30
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
LayerCloneBase.hpp