296 TEST_CASE(
"LSTMValidateTensorShapesFromInputsCIFGDisabledTest")
301 CreateLSTMLayerHelper(graph,
false);
307 TEST_CASE(
"LSTMValidateTensorShapesFromInputsCIFGEnabledTest")
312 CreateLSTMLayerHelper(graph,
true);
318 TEST_CASE(
"InsertConvertersTest")
332 ->GetOutputHandler().SetTensorInfo(info);
341 ->GetOutputHandler().SetTensorInfo(info);
346 &IsLayerOfType<armnn::InputLayer>,
347 &IsLayerOfType<armnn::InputLayer>,
348 &IsLayerOfType<armnn::MemCopyLayer>,
349 &IsLayerOfType<armnn::FloorLayer>,
350 &IsLayerOfType<armnn::AdditionLayer>,
351 &IsLayerOfType<armnn::OutputLayer>));
354 for (
auto& layer : graph)
364 for (
auto& layer : graph)
374 for (
auto& layer : graph)
396 &IsLayerOfType<armnn::InputLayer>,
397 &IsLayerOfType<armnn::InputLayer>,
398 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
399 &IsLayerOfType<armnn::MemCopyLayer>,
400 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
401 &IsLayerOfType<armnn::FloorLayer>,
402 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
403 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
404 &IsLayerOfType<armnn::AdditionLayer>,
405 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
406 &IsLayerOfType<armnn::OutputLayer>));
409 void CreateConvolution2dGraph(
Graph &graph,
const unsigned int* inputShape,
410 const unsigned int* weightsShape,
const unsigned int* outputShape,
416 std::vector<float> weightsVector(90);
429 layer->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
437 TEST_CASE(
"Conv2dValidateTensorShapesFromInputs")
440 const unsigned int inputShape[] = { 1, 3, 8, 16 };
441 const unsigned int weightsShape[] = { 2, 3, 5, 3 };
442 const unsigned int outputShape[] = { 1, 2, 4, 14 };
443 CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
448 TEST_CASE(
"Conv2dValidateTensorShapesFromInputsNhwc")
451 const unsigned int inputShape[] = { 1, 8, 16, 3 };
452 const unsigned int weightsShape[] = { 2, 5, 3, 3 };
453 const unsigned int outputShape[] = { 1, 4, 14, 2 };
454 CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape,
DataLayout::NHWC);
459 void CreateDepthwiseConvolution2dGraph(
Graph &graph,
const unsigned int* inputShape,
460 const unsigned int* weightsShape,
const unsigned int* outputShape,
466 std::vector<float> weightsVector(18);
479 layer->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
487 TEST_CASE(
"DepthwiseConv2dValidateTensorShapesFromInputs")
490 const unsigned int inputShape[] = { 1, 2, 3, 3 };
491 const unsigned int weightsShape[] = { 1, 3, 3, 2 };
492 const unsigned int outputShape[] = { 1, 2, 1, 1 };
493 CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
498 TEST_CASE(
"DepthwiseConv2dValidateTensorShapesFromInputsNhwc")
501 const unsigned int inputShape[] = { 1, 3, 3, 2 };
502 const unsigned int weightsShape[] = { 1, 3, 3, 2 };
503 const unsigned int outputShape[] = { 1, 1, 1, 2 };
504 CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape,
DataLayout::NHWC);
509 void CreatePooling2dGraph(
Graph& graph,
const unsigned int* inputShape,
const unsigned int* outputShape,
537 TEST_CASE(
"Pooling2dValidateTensorShapesFromInputs")
540 const unsigned int inputShape[] = { 5, 3, 52, 60 };
541 const unsigned int outputShape[] = { 5, 3, 11, 13 };
547 TEST_CASE(
"Pooling2dValidateTensorShapesFromInputsNhwc")
550 const unsigned int inputShape[] = { 5, 52, 60, 3 };
551 const unsigned int outputShape[] = { 5, 11, 13, 3 };
557 void CreateResizeBilinearGraph(
Graph& graph,
558 const unsigned int* inputShape,
559 const unsigned int* outputShape,
582 TEST_CASE(
"ResizeBilinearValidateTensorShapesFromInputs")
585 const unsigned int inputShape[] = { 1, 2, 4, 5 };
586 const unsigned int outputShape[] = { 1, 2, 3, 4 };
587 CreateResizeBilinearGraph(graph, inputShape, outputShape);
592 TEST_CASE(
"ResizeBilinearValidateTensorShapesFromInputsNhwc")
595 const unsigned int inputShape[] = { 1, 4, 5, 2 };
596 const unsigned int outputShape[] = { 1, 3, 4, 2 };
597 CreateResizeBilinearGraph(graph, inputShape, outputShape,
DataLayout::NHWC);
602 void CreateGatherGraph(
Graph& graph,
623 TEST_CASE(
"GatherValidateTensorShapesFromInputs")
630 CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
635 TEST_CASE(
"GatherValidateTensorShapesFromInputs1DParams")
642 CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
647 TEST_CASE(
"GatherValidateTensorShapesFromInputsMultiDimIndices")
654 CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
659 TEST_CASE(
"DetectionPostProcessValidateTensorShapes")
664 std::vector<uint8_t> anchorsVector(40);
682 layer->
m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
688 input0->GetOutputSlot().Connect(layer->
GetInputSlot(0));
694 TEST_CASE(
"BackendCapabilityTest")
708 TEST_CASE(
"BackendHintTest")
716 auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
717 CHECK((inputLayer->GetBackendId() ==
"MockBackend"));
723 auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
724 CHECK((outputLayer->GetBackendId() ==
"MockBackend"));
729 const char* name =
nullptr)
override 732 auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
733 CHECK((activation->GetBackendId() ==
"CustomBackend"));
757 backendRegistry.Register(
"MockBackend", []() {
return std::make_unique<MockBackend<MockPolicy>>(); });
759 backendRegistry.Register(
"CustomBackend", []() {
return std::make_unique<MockBackend<CustomPolicy>>(); });
766 std::unique_ptr<Graph> graph = std::make_unique<Graph>();
767 auto input = graph->AddLayer<
InputLayer>(0,
"input");
769 auto output = graph->AddLayer<
OutputLayer>(0,
"output");
771 BackendId customBackendId(
"CustomBackend");
780 Graph& optGraph = optNet.GetGraph();
782 std::vector<BackendId> prefs{
"MockBackend",
"CustomBackend" };
784 BackendIdSet availableBackends = {
"CustomBackend",
"MockBackend" };
802 TestBackendAssignment visitor;
803 for (
auto it = firstLayer; it != lastLayer; ++it)
805 (*it)->Accept(visitor);
808 backendRegistry.Deregister(
"MockBackend");
809 backendRegistry.Deregister(
"CustomBackend");
813 TEST_CASE(
"OptimizeForExclusiveConnectionsFuseTest")
815 using namespace armnn;
823 const unsigned int inputDimensionSizes[] = { 1, 4, 4, 3 };
824 const unsigned int weightsDimensionSizes[] = { 1, 2, 2, 3 };
825 const unsigned int outputDimensionSizes[] = { 1, 3, 3, 1 };
826 const unsigned int outputChannelSize[] = { outputDimensionSizes[3] };
831 std::vector<float> weightsVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
834 std::vector<float> betaVector = { 0.1f };
835 std::vector<float> gammaVector = { 0.5f };
836 std::vector<float> meanVector = { 0 };
837 std::vector<float> varianceVector = { 1 };
854 conv->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
855 batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
856 batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
857 batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
858 batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
859 if (convolution2dDescriptor.m_BiasEnabled)
861 std::vector<float> biasVector = { 11 };
863 conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
873 &IsLayerOfType<InputLayer>,
874 &IsLayerOfType<Convolution2dLayer>,
875 &IsLayerOfType<BatchNormalizationLayer>,
876 &IsLayerOfType<OutputLayer>));
881 auto checkFusedConv2d = [](
const armnn::Layer*
const layer) ->
bool {
882 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
883 (layer->GetNameStr() ==
"fused-batchNorm-into-convolution");
888 &IsLayerOfType<InputLayer>,
890 &IsLayerOfType<OutputLayer>));
894 TEST_CASE(
"OptimizeForExclusiveConnectionsWithoutFuseTest")
915 &IsLayerOfType<armnn::InputLayer>,
916 &IsLayerOfType<armnn::Convolution2dLayer>,
917 &IsLayerOfType<armnn::BatchNormalizationLayer>,
918 &IsLayerOfType<armnn::OutputLayer>,
919 &IsLayerOfType<armnn::OutputLayer>));
925 &IsLayerOfType<armnn::InputLayer>,
926 &IsLayerOfType<armnn::Convolution2dLayer>,
927 &IsLayerOfType<armnn::BatchNormalizationLayer>,
928 &IsLayerOfType<armnn::OutputLayer>,
929 &IsLayerOfType<armnn::OutputLayer>));
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
This layer represents a batch normalization operation.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
std::unordered_set< BackendId > BackendIdSet
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
static void Pass(Graph &graph, const Optimizations &optimizations)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
This layer represents a detection postprocess operator.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerList::const_iterator Iterator
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
A ResizeDescriptor for the ResizeLayer.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a Gather operator.
uint32_t m_PadRight
Padding right value in the width dimension.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
An ActivationDescriptor for the ActivationLayer.
This layer represents a floor operation.
uint32_t m_TargetHeight
Target height value.
Visitor base class with empty implementations.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
void SetTensorInfo(const TensorInfo &tensorInfo) override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
This layer represents a convolution 2d operation.
A Pooling2dDescriptor for the Pooling2dLayer.
size_t GetNumLayers() const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
static INetworkPtr Create(NetworkOptions networkOptions={})
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
This layer represents a resize operation.