aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/layers')
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp10
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/ConstantLayer.cpp6
-rw-r--r--src/armnn/layers/ConstantLayer.hpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp12
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp12
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp7
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp2
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp5
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp2
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp6
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp12
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp2
-rw-r--r--src/armnn/layers/LayerWithParameters.hpp5
-rw-r--r--src/armnn/layers/LstmLayer.cpp146
-rw-r--r--src/armnn/layers/LstmLayer.hpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp6
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp2
-rw-r--r--src/armnn/layers/MemImportLayer.cpp6
-rw-r--r--src/armnn/layers/MemImportLayer.hpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp6
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp2
-rw-r--r--src/armnn/layers/QLstmLayer.cpp126
-rw-r--r--src/armnn/layers/QLstmLayer.hpp2
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp87
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp2
-rw-r--r--src/armnn/layers/RankLayer.cpp5
-rw-r--r--src/armnn/layers/RankLayer.hpp4
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp12
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp2
33 files changed, 502 insertions, 1 deletions
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index ce351a4376..6df5195a55 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -80,4 +80,14 @@ void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
}
+void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_Mean->GetTensorInfo(), m_Mean->Map(true)},
+ {m_Variance->GetTensorInfo(), m_Variance->Map(true)},
+ {m_Beta->GetTensorInfo(), m_Beta->Map(true)},
+ {m_Gamma->GetTensorInfo(), m_Gamma->Map(true)} };
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 3915897a52..dab75d1e12 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -41,6 +41,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a BatchNormalizationLayer.
/// @param [in] param BatchNormalizationDescriptor to configure the batch normalization operation.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 76b9997cfe..31e9e974cf 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -68,4 +68,10 @@ void ConstantLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
}
+void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_LayerOutput->GetTensorInfo(), m_LayerOutput->Map(true)} };
+ strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 36fa1f96e9..9d91551df9 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -41,6 +41,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
std::unique_ptr<ScopedCpuTensorHandle> m_LayerOutput;
protected:
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 18557bf64e..0c3040ea6e 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -157,4 +157,16 @@ void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+ if (GetParameters().m_BiasEnabled)
+ {
+ constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+ }
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 4dd1497fd8..440c80dfa9 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -44,6 +44,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
protected:
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index ff9cebafd5..1871b7d15d 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -165,4 +165,16 @@ void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+ if (GetParameters().m_BiasEnabled)
+ {
+ constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+ }
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index dd0b0e6b88..7388cbcd8e 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -43,6 +43,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
protected:
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index d54bf26c40..356377a2f5 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -84,4 +84,11 @@ void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
}
+void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_Anchors->GetTensorInfo(), m_Anchors->GetConstTensor<void>()} };
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index 374eef5ec5..b0d58589b4 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -36,6 +36,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a DetectionPostProcessLayer.
/// @param [in] param DetectionPostProcessDescriptor to configure the detection postprocess.
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 631e08c2ac..a169d31b2d 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -82,4 +82,9 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
+void ElementwiseBaseLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 3893dcd9f9..17e8b446e0 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -27,6 +27,8 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// @param numInputSlots The number of input slots for the layer.
/// @param numOutputSlots The number of output slots for the layer.
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index a316b2b82a..102a6725a7 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -52,4 +52,10 @@ void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
+void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ IgnoreUnused(strategy);
+ throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 09bd530f86..78e49e6474 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -30,6 +30,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a FakeQuantizationLayer.
/// @param [in] param FakeQuantizationDescriptor to configure the fake quantization operation.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index ca7a0cc4bb..0e5e5942de 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -101,4 +101,16 @@ void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitFullyConnectedLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+ if (GetParameters().m_BiasEnabled)
+ {
+ constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+ }
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index bbacd2551d..4a9cbe1136 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,6 +43,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a FullyConnectedLayer.
/// @param [in] param FullyConnectedDescriptor to configure the fully connected operation.
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp
index 3f3bdd8050..952eff66ff 100644
--- a/src/armnn/layers/LayerWithParameters.hpp
+++ b/src/armnn/layers/LayerWithParameters.hpp
@@ -48,6 +48,11 @@ protected:
/// The parameters for the layer (not including tensor-valued weights etc.).
Parameters m_Param;
+
+ void ExecuteStrategy(IStrategy& strategy) const override
+ {
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+ }
};
} // namespace
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 8e396ab70c..ebc408a636 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -480,4 +480,150 @@ void LstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
}
+void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<ConstTensor> constTensors;
+
+ LstmDescriptor descriptor = GetParameters();
+
+ // First add mandatory/basic parameters
+ if (m_BasicParameters.m_InputToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(),
+ m_BasicParameters.m_InputToForgetWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_InputToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(),
+ m_BasicParameters.m_InputToCellWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_InputToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(),
+ m_BasicParameters.m_InputToOutputWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+ m_BasicParameters.m_RecurrentToForgetWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+ m_BasicParameters.m_RecurrentToCellWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+ m_BasicParameters.m_RecurrentToOutputWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_ForgetGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(),
+ m_BasicParameters.m_ForgetGateBias->Map(true)));
+ }
+ if (m_BasicParameters.m_CellBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_CellBias->GetTensorInfo(),
+ m_BasicParameters.m_CellBias->Map(true)));
+ }
+ if (m_BasicParameters.m_OutputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_OutputGateBias->GetTensorInfo(),
+ m_BasicParameters.m_OutputGateBias->Map(true)));
+ }
+
+ // Add cifg parameters
+ if (!descriptor.m_CifgEnabled)
+ {
+ if (m_CifgParameters.m_InputToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(),
+ m_CifgParameters.m_InputToInputWeights->Map(true)));
+ }
+ if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+ m_CifgParameters.m_RecurrentToInputWeights->Map(true)));
+ }
+ if (m_CifgParameters.m_InputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(),
+ m_CifgParameters.m_InputGateBias->Map(true)));
+ }
+ }
+
+ // Add peephole parameters
+ if (descriptor.m_PeepholeEnabled)
+ {
+ if (!descriptor.m_CifgEnabled)
+ {
+ if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
+ m_PeepholeParameters.m_CellToInputWeights->Map(true)));
+ }
+ }
+ if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(),
+ m_PeepholeParameters.m_CellToForgetWeights->Map(true)));
+ }
+ if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(),
+ m_PeepholeParameters.m_CellToOutputWeights->Map(true)));
+ }
+ }
+
+ // Add projection parameters
+ if (descriptor.m_ProjectionEnabled)
+ {
+ if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(),
+ m_ProjectionParameters.m_ProjectionWeights->Map(true)));
+ }
+ if (m_ProjectionParameters.m_ProjectionBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(),
+ m_ProjectionParameters.m_ProjectionBias->Map(true)));
+ }
+ }
+
+ // Add norm parameters
+ if (descriptor.m_LayerNormEnabled)
+ {
+ if (!descriptor.m_CifgEnabled)
+ {
+ if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_InputLayerNormWeights->Map(true)));
+ }
+ }
+ if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true)));
+ }
+ if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_CellLayerNormWeights->Map(true)));
+ }
+ if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_OutputLayerNormWeights->Map(true)));
+ }
+ }
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 51348d7015..30f952e276 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -107,6 +107,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a LstmLayer.
/// @param [in] param LstmDescriptor to configure the lstm operation.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index d9a802c23c..40c1b98012 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -55,4 +55,10 @@ void MemCopyLayer::Accept(ILayerVisitor& visitor) const
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
+void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ IgnoreUnused(strategy);
+ throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 996d6872d3..b913c529e5 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -30,6 +30,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a MemCopyLayer.
/// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3d1c702946..c96f92bc5e 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -55,4 +55,10 @@ void MemImportLayer::Accept(ILayerVisitor& visitor) const
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
+void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ IgnoreUnused(strategy);
+ throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 1cbdaac00b..47379701c7 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -30,6 +30,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a MemImportLayer.
/// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index dbbc1fd716..75c1e46a84 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -55,4 +55,10 @@ void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
+void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ IgnoreUnused(strategy);
+ throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index a4851c778f..2ed87578a4 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -35,6 +35,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
private:
PreCompiledLayer(const PreCompiledLayer& other) = delete;
PreCompiledLayer& operator=(const PreCompiledLayer& other) = delete;
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 85f99bddf9..d957bbb485 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -503,4 +503,130 @@ void QLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
}
+
+void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<ConstTensor> constTensors;
+
+ // First add mandatory/basic parameters
+ if (m_BasicParameters.m_InputToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(),
+ m_BasicParameters.m_InputToForgetWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_InputToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToCellWeights->GetTensorInfo(),
+ m_BasicParameters.m_InputToCellWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_InputToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(),
+ m_BasicParameters.m_InputToOutputWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+ m_BasicParameters.m_RecurrentToForgetWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+ m_BasicParameters.m_RecurrentToCellWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+ m_BasicParameters.m_RecurrentToOutputWeights->Map(true)));
+ }
+ if (m_BasicParameters.m_ForgetGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_ForgetGateBias->GetTensorInfo(),
+ m_BasicParameters.m_ForgetGateBias->Map(true)));
+ }
+ if (m_BasicParameters.m_CellBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_CellBias->GetTensorInfo(),
+ m_BasicParameters.m_CellBias->Map(true)));
+ }
+ if (m_BasicParameters.m_OutputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_BasicParameters.m_OutputGateBias->GetTensorInfo(),
+ m_BasicParameters.m_OutputGateBias->Map(true)));
+ }
+
+ // Add cifig parameters
+ if (m_CifgParameters.m_InputToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputToInputWeights->GetTensorInfo(),
+ m_CifgParameters.m_InputToInputWeights->Map(true)));
+ }
+ if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+ m_CifgParameters.m_RecurrentToInputWeights->Map(true)));
+ }
+ if (m_CifgParameters.m_InputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_CifgParameters.m_InputGateBias->GetTensorInfo(),
+ m_CifgParameters.m_InputGateBias->Map(true)));
+ }
+
+ // Add peephole parameters
+ if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
+ m_PeepholeParameters.m_CellToInputWeights->Map(true)));
+ }
+ if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(),
+ m_PeepholeParameters.m_CellToForgetWeights->Map(true)));
+ }
+ if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(),
+ m_PeepholeParameters.m_CellToOutputWeights->Map(true)));
+ }
+
+ // Add projection parameters
+ if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(),
+ m_ProjectionParameters.m_ProjectionWeights->Map(true)));
+ }
+ if (m_ProjectionParameters.m_ProjectionBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(),
+ m_ProjectionParameters.m_ProjectionBias->Map(true)));
+ }
+
+ // Add norm parameters
+ if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_InputLayerNormWeights->Map(true)));
+ }
+ if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_ForgetLayerNormWeights->Map(true)));
+ }
+ if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_CellLayerNormWeights->Map(true)));
+ }
+ if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(),
+ m_LayerNormParameters.m_OutputLayerNormWeights->Map(true)));
+ }
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 5757ef6559..70cc4f2b15 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -109,6 +109,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a QLstmLayer.
/// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 624e443064..578d9eb137 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -291,4 +291,91 @@ void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
}
+void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<ConstTensor> constTensors;
+
+ // InputToX weight tensors
+ if (m_QuantizedLstmParameters.m_InputToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToInputWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToForgetWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_InputToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToCellWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToOutputWeights->Map(true)));
+ }
+
+ // RecurrentToX weight tensors
+ if (m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToInputWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToCellWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Map(true)));
+ }
+
+ // Bias tensors
+ if (m_QuantizedLstmParameters.m_InputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputGateBias->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_ForgetGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_ForgetGateBias->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_CellBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_CellBias->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_OutputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_OutputGateBias->Map(true)));
+ }
+
+
+ strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index bfe86a4629..544acbd816 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -71,6 +71,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a QuantizedLstmLayer.
/// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 2b0dffe370..3b14ef0d93 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -46,4 +46,9 @@ void RankLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitRankLayer(this, GetName());
}
+void RankLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
+}
+
} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index f4f1ec9e66..fbd2824bb5 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -24,7 +24,9 @@ class RankLayer : public Layer
void Accept(ILayerVisitor& visitor) const override;
- protected:
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
+protected:
RankLayer(const char* name);
~RankLayer() = default;
};
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 189e5f6168..bd8cb096e2 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -135,4 +135,16 @@ void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<armnn::ConstTensor> constTensors { {m_Weight->GetTensorInfo(), m_Weight->Map(true)} };
+
+ if (GetParameters().m_BiasEnabled)
+ {
+ constTensors.emplace_back(ConstTensor(m_Bias->GetTensorInfo(), m_Bias->Map(true)));
+ }
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
} // namespace armnn
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 1ee984d231..903c957393 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -42,6 +42,8 @@ public:
void Accept(ILayerVisitor& visitor) const override;
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
protected:
/// Constructor to create a TransposeConvolution2dLayer.
/// @param [in] param TransposeConvolution2dDescriptor to configure the 2D transpose convolution operation.