24 #include <boost/test/unit_test.hpp> 26 using namespace armnn;
31 void CreateLSTMLayerHelper(
Graph &graph,
bool CifgEnabled)
42 unsigned int batchSize = 3;
43 unsigned int inputSize = 2;
44 unsigned int numUnits = 4;
45 unsigned int outputSize = 4;
76 if (!layerDesc.m_CifgEnabled)
89 if (layerDesc.m_ProjectionEnabled)
99 if (layerDesc.m_PeepholeEnabled)
101 if (!layerDesc.m_CifgEnabled)
128 armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
131 Connect(input, layer, lstmTensorInfo1, 0, 0);
132 Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
133 Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
134 Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
135 Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
136 Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
137 Connect(layer, output, lstmTensorInfo3, 3, 0);
150 CreateLSTMLayerHelper(graph,
false);
161 CreateLSTMLayerHelper(graph,
true);
181 ->GetOutputHandler().SetTensorInfo(
info);
190 ->GetOutputHandler().SetTensorInfo(
info);
195 &IsLayerOfType<armnn::InputLayer>,
196 &IsLayerOfType<armnn::InputLayer>,
197 &IsLayerOfType<armnn::MemCopyLayer>,
198 &IsLayerOfType<armnn::FloorLayer>,
199 &IsLayerOfType<armnn::AdditionLayer>,
200 &IsLayerOfType<armnn::OutputLayer>));
203 for (
auto& layer : graph)
213 for (
auto& layer : graph)
223 for (
auto& layer : graph)
245 &IsLayerOfType<armnn::InputLayer>,
246 &IsLayerOfType<armnn::InputLayer>,
247 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
248 &IsLayerOfType<armnn::MemCopyLayer>,
249 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
250 &IsLayerOfType<armnn::FloorLayer>,
251 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
252 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
253 &IsLayerOfType<armnn::AdditionLayer>,
254 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
255 &IsLayerOfType<armnn::OutputLayer>));
261 const unsigned int* weightsShape,
const unsigned int* outputShape,
267 std::vector<float> weightsVector(90);
280 layer->
m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
291 const unsigned int inputShape[] = { 1, 3, 8, 16 };
292 const unsigned int weightsShape[] = { 2, 3, 5, 3 };
293 const unsigned int outputShape[] = { 1, 2, 4, 14 };
302 const unsigned int inputShape[] = { 1, 8, 16, 3 };
303 const unsigned int weightsShape[] = { 2, 5, 3, 3 };
304 const unsigned int outputShape[] = { 1, 4, 14, 2 };
311 const unsigned int* weightsShape,
const unsigned int* outputShape,
317 std::vector<float> weightsVector(18);
330 layer->
m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
341 const unsigned int inputShape[] = { 1, 2, 3, 3 };
342 const unsigned int weightsShape[] = { 1, 2, 3, 3 };
343 const unsigned int outputShape[] = { 1, 2, 1, 1 };
352 const unsigned int inputShape[] = { 1, 3, 3, 2 };
353 const unsigned int weightsShape[] = { 1, 2, 3, 3 };
354 const unsigned int outputShape[] = { 1, 1, 1, 2 };
391 const unsigned int inputShape[] = { 5, 3, 52, 60 };
392 const unsigned int outputShape[] = { 5, 3, 11, 13 };
401 const unsigned int inputShape[] = { 5, 52, 60, 3 };
402 const unsigned int outputShape[] = { 5, 11, 13, 3 };
434 const unsigned int inputShape[] = { 1, 2, 4, 5 };
435 const unsigned int outputShape[] = { 1, 2, 3, 4 };
444 const unsigned int inputShape[] = { 1, 4, 5, 2 };
445 const unsigned int outputShape[] = { 1, 3, 4, 2 };
511 std::vector<uint8_t> anchorsVector(40);
535 input0->GetOutputSlot().Connect(layer->
GetInputSlot(0));
544 const unsigned int inputShape[] = { 1, 2, 2, 3 };
545 const unsigned int paddedShape[] = { 1, 6, 6, 3 };
546 const unsigned int weightsShape[] = { 1, 2, 3, 3 };
547 const unsigned int outputShape[] = { 1, 2, 1, 1 };
557 PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }});
568 std::vector<float> weightsVector(18);
572 conv2dLayer->
m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
579 padLayer->GetOutputSlot().Connect(conv2dLayer->
GetInputSlot(0));
582 auto checkSimpleConv2d = [ ](
const armnn::Layer*
const layer) ->
bool 586 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
588 (conv2dLayerParams.m_PadLeft == 0) &&
589 (conv2dLayerParams.m_PadRight == 0) &&
590 (conv2dLayerParams.m_PadTop == 0) &&
591 (conv2dLayerParams.m_PadBottom == 0) &&
592 (conv2dLayerParams.m_BiasEnabled ==
false) &&
593 (conv2dLayerParams.m_StrideX == 1) &&
594 (conv2dLayerParams.m_StrideY == 1) &&
600 &IsLayerOfType<armnn::InputLayer>,
601 &IsLayerOfType<armnn::PadLayer>,
603 &IsLayerOfType<armnn::OutputLayer>));
607 auto checkPadFoldedIntoConv2d = [ ](
const armnn::Layer*
const layer) ->
bool 611 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
612 (layer->
GetNameStr() ==
"folded-pad-into-conv2d") &&
613 (conv2dLayerParams.m_PadLeft == 2) &&
614 (conv2dLayerParams.m_PadRight == 2) &&
615 (conv2dLayerParams.m_PadTop == 2) &&
616 (conv2dLayerParams.m_PadBottom == 2) &&
617 (conv2dLayerParams.m_BiasEnabled ==
false) &&
618 (conv2dLayerParams.m_StrideX == 1) &&
619 (conv2dLayerParams.m_StrideY == 1) &&
625 &IsLayerOfType<armnn::InputLayer>,
626 checkPadFoldedIntoConv2d,
627 &IsLayerOfType<armnn::OutputLayer>));
656 template<
typename NamePolicy>
663 static const BackendId& GetIdStatic() {
return NamePolicy::GetIdStatic(); }
664 const BackendId& GetId()
const override {
return GetIdStatic(); }
679 return std::make_shared<MockLayerSupport>();
696 const char* name =
nullptr)
override 699 auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
700 BOOST_TEST((inputLayer->GetBackendId() ==
"MockBackend"));
705 const char* name =
nullptr)
override 708 auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
709 BOOST_TEST((outputLayer->GetBackendId() ==
"MockBackend"));
714 const char* name =
nullptr)
override 717 auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
718 BOOST_TEST((activation->GetBackendId() ==
"CustomBackend"));
742 backendRegistry.Register(
"MockBackend", [](){
743 return std::make_unique<MockBackend<MockPolicy>>();
746 backendRegistry.Register(
"CustomBackend", [](){
747 return std::make_unique<MockBackend<CustomPolicy>>();
755 std::unique_ptr<Graph> graph = std::make_unique<Graph>();
756 auto input = graph->AddLayer<
InputLayer>(0,
"input");
758 auto output = graph->AddLayer<
OutputLayer>(0,
"output");
760 BackendId customBackendId(
"CustomBackend");
769 OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
775 std::vector<BackendId> prefs{
"MockBackend",
"CustomBackend"};
777 BackendIdSet availableBackends = {
"CustomBackend",
"MockBackend"};
791 BOOST_TEST(res.
IsOk());
793 TestBackendAssignment visitor;
794 for (
auto it =firstLayer; it != lastLayer; ++it)
796 (*it)->Accept(visitor);
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
std::unique_ptr< ScopedCpuTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LstmBasicParameters m_BasicParameters
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::vector< OptimizationPtr > Optimizations
const Parameters & GetParameters() const
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
float m_ClippingThresProj
Clipping threshold value for the projection.
std::unordered_set< BackendId > BackendIdSet
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
A Convolution2dDescriptor for the Convolution2dLayer.
void CreateResizeBilinearGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
int Connect(InputSlot &destination)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
static void Pass(Graph &graph, const Optimizations &optimizations)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
std::unique_ptr< ScopedCpuTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a detection postprocess operator.
void CreatePooling2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
Copyright (c) 2020 ARM Limited.
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a pad operation.
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
LayerList::const_iterator Iterator
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
A ResizeDescriptor for the ResizeLayer.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a Gather operator.
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
void CreateGatherGraph(Graph &graph, const armnn::TensorInfo ¶msInfo, const armnn::TensorInfo &indicesInfo, const armnn::TensorInfo &outputInfo)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
std::unique_ptr< ScopedCpuTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
const std::string & GetNameStr() const
uint32_t m_TargetWidth
Target width value.
bool m_PeepholeEnabled
Enable/disable peephole.
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
This layer represents a floor operation.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
Visitor base class with empty implementations.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
LstmOptPeepholeParameters m_PeepholeParameters
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
LstmOptProjectionParameters m_ProjectionParameters
OptimizationResult AssignBackends(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
BOOST_AUTO_TEST_SUITE_END()
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
std::unique_ptr< ScopedCpuTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
void CreateConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
void SetTensorInfo(const TensorInfo &tensorInfo) override
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::unique_ptr< ScopedCpuTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
DataType GetDataType() const
LayerType GetType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
std::unique_ptr< ScopedCpuTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
LstmOptCifgParameters m_CifgParameters
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A Pooling2dDescriptor for the Pooling2dLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int *inputShape, const unsigned int *weightsShape, const unsigned int *outputShape, DataLayout dataLayout=DataLayout::NCHW)
const TensorInfo & GetTensorInfo() const override
static void Destroy(IOptimizedNetwork *network)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
static INetworkPtr Create()
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
std::unique_ptr< ScopedCpuTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
This layer represents a resize operation.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
std::unique_ptr< IBackendContext > IBackendContextPtr