24 #include <boost/test/unit_test.hpp> 26 using namespace armnn;
31 void CreateLSTMLayerHelper(
Graph &graph,
bool CifgEnabled)
42 unsigned int batchSize = 3;
43 unsigned int inputSize = 2;
44 unsigned int numUnits = 4;
45 unsigned int outputSize = 4;
76 if (!layerDesc.m_CifgEnabled)
89 if (layerDesc.m_ProjectionEnabled)
99 if (layerDesc.m_PeepholeEnabled)
101 if (!layerDesc.m_CifgEnabled)
128 armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
131 Connect(input, layer, lstmTensorInfo1, 0, 0);
132 Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
133 Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
134 Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
135 Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
136 Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
137 Connect(layer, output, lstmTensorInfo3, 3, 0);
150 CreateLSTMLayerHelper(graph,
false);
161 CreateLSTMLayerHelper(graph,
true);
181 ->GetOutputHandler().SetTensorInfo(
info);
190 ->GetOutputHandler().SetTensorInfo(
info);
195 &IsLayerOfType<armnn::InputLayer>,
196 &IsLayerOfType<armnn::InputLayer>,
197 &IsLayerOfType<armnn::MemCopyLayer>,
198 &IsLayerOfType<armnn::FloorLayer>,
199 &IsLayerOfType<armnn::AdditionLayer>,
200 &IsLayerOfType<armnn::OutputLayer>));
203 for (
auto& layer : graph)
213 for (
auto& layer : graph)
223 for (
auto& layer : graph)
245 &IsLayerOfType<armnn::InputLayer>,
246 &IsLayerOfType<armnn::InputLayer>,
247 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
248 &IsLayerOfType<armnn::MemCopyLayer>,
249 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
250 &IsLayerOfType<armnn::FloorLayer>,
251 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
252 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
253 &IsLayerOfType<armnn::AdditionLayer>,
254 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
255 &IsLayerOfType<armnn::OutputLayer>));
259 const unsigned int* weightsShape,
const unsigned int* outputShape,
265 std::vector<float> weightsVector(90);
278 layer->
m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
289 const unsigned int inputShape[] = { 1, 3, 8, 16 };
290 const unsigned int weightsShape[] = { 2, 3, 5, 3 };
291 const unsigned int outputShape[] = { 1, 2, 4, 14 };
300 const unsigned int inputShape[] = { 1, 8, 16, 3 };
301 const unsigned int weightsShape[] = { 2, 5, 3, 3 };
302 const unsigned int outputShape[] = { 1, 4, 14, 2 };
309 const unsigned int* weightsShape,
const unsigned int* outputShape,
315 std::vector<float> weightsVector(18);
328 layer->
m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
339 const unsigned int inputShape[] = { 1, 2, 3, 3 };
340 const unsigned int weightsShape[] = { 1, 2, 3, 3 };
341 const unsigned int outputShape[] = { 1, 2, 1, 1 };
350 const unsigned int inputShape[] = { 1, 3, 3, 2 };
351 const unsigned int weightsShape[] = { 1, 2, 3, 3 };
352 const unsigned int outputShape[] = { 1, 1, 1, 2 };
389 const unsigned int inputShape[] = { 5, 3, 52, 60 };
390 const unsigned int outputShape[] = { 5, 3, 11, 13 };
399 const unsigned int inputShape[] = { 5, 52, 60, 3 };
400 const unsigned int outputShape[] = { 5, 11, 13, 3 };
432 const unsigned int inputShape[] = { 1, 2, 4, 5 };
433 const unsigned int outputShape[] = { 1, 2, 3, 4 };
442 const unsigned int inputShape[] = { 1, 4, 5, 2 };
443 const unsigned int outputShape[] = { 1, 3, 4, 2 };
509 std::vector<uint8_t> anchorsVector(40);
533 input0->GetOutputSlot().Connect(layer->
GetInputSlot(0));
542 const unsigned int inputShape[] = { 1, 2, 2, 3 };
543 const unsigned int paddedShape[] = { 1, 6, 6, 3 };
544 const unsigned int weightsShape[] = { 1, 2, 3, 3 };
545 const unsigned int outputShape[] = { 1, 2, 1, 1 };
554 PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
565 std::vector<float> weightsVector(18);
569 conv2dLayer->
m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
576 padLayer->GetOutputSlot().Connect(conv2dLayer->
GetInputSlot(0));
579 auto checkSimpleConv2d = [ ](
const armnn::Layer*
const layer) ->
bool 583 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
585 (conv2dLayerParams.m_PadLeft == 0) &&
586 (conv2dLayerParams.m_PadRight == 0) &&
587 (conv2dLayerParams.m_PadTop == 0) &&
588 (conv2dLayerParams.m_PadBottom == 0) &&
589 (conv2dLayerParams.m_BiasEnabled ==
false) &&
590 (conv2dLayerParams.m_StrideX == 1) &&
591 (conv2dLayerParams.m_StrideY == 1) &&
597 &IsLayerOfType<armnn::InputLayer>,
598 &IsLayerOfType<armnn::PadLayer>,
600 &IsLayerOfType<armnn::OutputLayer>));
604 auto checkPadFoldedIntoConv2d = [ ](
const armnn::Layer*
const layer) ->
bool 608 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
609 (layer->
GetNameStr() ==
"folded-pad-into-conv2d") &&
610 (conv2dLayerParams.m_PadLeft == 2) &&
611 (conv2dLayerParams.m_PadRight == 2) &&
612 (conv2dLayerParams.m_PadTop == 2) &&
613 (conv2dLayerParams.m_PadBottom == 2) &&
614 (conv2dLayerParams.m_BiasEnabled ==
false) &&
615 (conv2dLayerParams.m_StrideX == 1) &&
616 (conv2dLayerParams.m_StrideY == 1) &&
622 &IsLayerOfType<armnn::InputLayer>,
623 checkPadFoldedIntoConv2d,
624 &IsLayerOfType<armnn::OutputLayer>));
650 template<
typename NamePolicy>
657 static const BackendId& GetIdStatic() {
return NamePolicy::GetIdStatic(); }
658 const BackendId& GetId()
const override {
return GetIdStatic(); }
673 return std::make_shared<MockLayerSupport>();
689 const char* name =
nullptr)
override 692 auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
693 BOOST_TEST((inputLayer->GetBackendId() ==
"MockBackend"));
698 const char* name =
nullptr)
override 701 auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
702 BOOST_TEST((outputLayer->GetBackendId() ==
"MockBackend"));
707 const char* name =
nullptr)
override 710 auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
711 BOOST_TEST((activation->GetBackendId() ==
"CustomBackend"));
735 backendRegistry.Register(
"MockBackend", [](){
736 return std::make_unique<MockBackend<MockPolicy>>();
739 backendRegistry.Register(
"CustomBackend", [](){
740 return std::make_unique<MockBackend<CustomPolicy>>();
748 std::unique_ptr<Graph> graph = std::make_unique<Graph>();
749 auto input = graph->AddLayer<
InputLayer>(0,
"input");
751 auto output = graph->AddLayer<
OutputLayer>(0,
"output");
753 BackendId customBackendId(
"CustomBackend");
761 OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
766 std::vector<BackendId> prefs{
"MockBackend",
"CustomBackend"};
768 BackendIdSet availableBackends = {
"CustomBackend",
"MockBackend"};
782 BOOST_TEST(res.
IsOk());
784 TestBackendAssignment visitor;
785 for (
auto it =firstLayer; it != lastLayer; ++it)
787 (*it)->Accept(visitor);
794 using namespace armnn;
802 const unsigned int inputDimensionSizes[] = {1, 4, 4, 3};
803 const unsigned int weightsDimensionSizes[] = {1, 2, 2, 3};
804 const unsigned int outputDimensionSizes[] = {1, 3, 3, 1};
805 const unsigned int outputChannelSize[] = {outputDimensionSizes[3]};
810 std::vector<float> weightsVector = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
813 std::vector<float> betaVector = { 0.1f };
814 std::vector<float> gammaVector = { 0.5f };
815 std::vector<float> meanVector = { 0 };
816 std::vector<float> varianceVector = { 1 };
833 conv->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
834 batchNorm->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
835 batchNorm->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
836 batchNorm->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
837 batchNorm->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
838 if (convolution2dDescriptor.m_BiasEnabled)
840 std::vector<float> biasVector = {11};
842 conv->m_Bias = std::make_unique<ScopedCpuTensorHandle>(bias);
853 &IsLayerOfType<InputLayer>,
854 &IsLayerOfType<Convolution2dLayer>,
855 &IsLayerOfType<BatchNormalizationLayer>,
856 &IsLayerOfType<OutputLayer>));
861 auto checkFusedConv2d = [](
const armnn::Layer*
const layer)->
bool 863 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
864 (layer->
GetNameStr() ==
"fused-batchNorm-into-convolution");
870 &IsLayerOfType<InputLayer>,
872 &IsLayerOfType<OutputLayer>));
898 &IsLayerOfType<armnn::InputLayer>,
899 &IsLayerOfType<armnn::Convolution2dLayer>,
900 &IsLayerOfType<armnn::BatchNormalizationLayer>,
901 &IsLayerOfType<armnn::OutputLayer>,
902 &IsLayerOfType<armnn::OutputLayer>));
909 &IsLayerOfType<armnn::InputLayer>,
910 &IsLayerOfType<armnn::Convolution2dLayer>,
911 &IsLayerOfType<armnn::BatchNormalizationLayer>,
912 &IsLayerOfType<armnn::OutputLayer>,
913 &IsLayerOfType<armnn::OutputLayer>));
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
std::unique_ptr< ScopedCpuTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LstmBasicParameters m_BasicParameters
This layer represents a batch normalization operation.
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::vector< OptimizationPtr > Optimizations
const Parameters & GetParameters() const
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
float m_ClippingThresProj
Clipping threshold value for the projection.
std::unordered_set< BackendId > BackendIdSet
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
A Convolution2dDescriptor for the Convolution2dLayer.
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
int Connect(InputSlot &destination)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
static void Pass(Graph &graph, const Optimizations &optimizations)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::unique_ptr< ScopedCpuTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a detection postprocess operator.
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
Copyright (c) 2020 ARM Limited.
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a pad operation.
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
LayerList::const_iterator Iterator
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
A ResizeDescriptor for the ResizeLayer.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a Gather operator.
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo ¶msInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
std::unique_ptr< ScopedCpuTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
const std::string & GetNameStr() const
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
This layer represents a floor operation.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
Visitor base class with empty implementations.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
LstmOptPeepholeParameters m_PeepholeParameters
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
LstmOptProjectionParameters m_ProjectionParameters
OptimizationResult AssignBackends(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
BOOST_AUTO_TEST_SUITE_END()
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
std::unique_ptr< ScopedCpuTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void SetTensorInfo(const TensorInfo &tensorInfo) override
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::unique_ptr< ScopedCpuTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
DataType GetDataType() const
LayerType GetType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
LstmOptCifgParameters m_CifgParameters
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A Pooling2dDescriptor for the Pooling2dLayer.
size_t GetNumLayers() const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
const TensorInfo & GetTensorInfo() const override
static INetworkPtr Create(NetworkOptions networkOptions={})
static void Destroy(IOptimizedNetwork *network)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
std::unique_ptr< ScopedCpuTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
This layer represents a resize operation.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
std::unique_ptr< IBackendContext > IBackendContextPtr