27 auto layer = CloneBase<BatchMatMulLayer>(graph,
m_Param,
GetName());
29 return std::move(layer);
42 inputXShape:inputYShape;
44 inputYShape:inputXShape;
50 std::vector<unsigned int> tensorDimensions(outputNumDimensions, 0);
53 const auto& longerAxesToMul = (axesToMul.first.first >= axesToMul.second.first &&
54 axesToMul.first.second >= axesToMul.second.second) ?
55 axesToMul.first : axesToMul.second;
57 for (
unsigned int i = 0; i < outputNumDimensions; ++i)
59 if (i == longerAxesToMul.first)
61 tensorDimensions[i] = &shorterInput == &inputXShape ? inputXShape[i - inputNumDimsOffset] : inputXShape[i];
63 else if(i == longerAxesToMul.second)
65 tensorDimensions[i] = &shorterInput == &inputYShape ? inputYShape[i - inputNumDimsOffset] : inputYShape[i];
70 tensorDimensions[i] =
static_cast<int>(i) - static_cast<int>(inputNumDimsOffset) < 0 ?
72 std::max(longerInput[i], shorterInput[i - inputNumDimsOffset]);
76 auto outputShape =
TensorShape(outputNumDimensions, tensorDimensions.data());
77 return std::vector<TensorShape>({ outputShape });
BatchMatMulDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const TensorShape & GetShape() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from the given input shapes.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
BatchMatMulLayer(const BatchMatMulDescriptor ¶m, const char *name)
Constructor to create a BatchMatMulLayer.
static std::pair< std::pair< unsigned int, unsigned int >, std::pair< unsigned int, unsigned int > > GetAxesToMul(const BatchMatMulDescriptor &desc, const TensorShape &tensorXShape, const TensorShape &tensorYShape)
Static helper to get the two axes (for each input) for multiplication.
#define ARMNN_ASSERT(COND)
A BatchMatMulDescriptor for the BatchMatMul operator.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shapes will lead to a valid configuration of BatchMatMulLayer.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the BatchMatMul type.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
BatchMatMulLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...