From 87d0bda9b49d9df4455f1887027e5ead2527c27e Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Fri, 3 Jul 2020 10:12:03 +0100 Subject: IVGCVSW-4929 Implement ShapeInferenceMethod in all Layers Signed-off-by: Finn Williams Change-Id: I2c2d99f97cf89814140b057a9f93f41b364197f5 --- src/armnn/test/FlowControl.cpp | 7 +- src/armnn/test/ShapeInferenceTests.cpp | 694 +++++++++++++++++++++++++++++++++ 2 files changed, 700 insertions(+), 1 deletion(-) create mode 100644 src/armnn/test/ShapeInferenceTests.cpp (limited to 'src/armnn/test') diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp index 3bc993b33b..6198ca8a24 100644 --- a/src/armnn/test/FlowControl.cpp +++ b/src/armnn/test/FlowControl.cpp @@ -28,13 +28,18 @@ BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) std::vector falseData = {0}; ConstTensor falseTensor(armnn::TensorInfo({1}, armnn::DataType::Boolean), falseData); - IConnectableLayer* constLayer = net->AddConstantLayer(falseTensor, "const"); + IConnectableLayer* constLayer = net->AddConstantLayer(falseTensor, "const"); constLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); IConnectableLayer* input = net->AddInputLayer(0); + input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); IConnectableLayer* switchLayer = net->AddSwitchLayer("switch"); + switchLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); + switchLayer->GetOutputSlot(1).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); + IConnectableLayer* mergeLayer = net->AddMergeLayer("merge"); + mergeLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); IConnectableLayer* output = net->AddOutputLayer(0); diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp new file mode 100644 index 0000000000..21df1f0e13 --- /dev/null +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -0,0 +1,694 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include +#include +#include +#include +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(ShapeInferenceTests) +using namespace armnn; +namespace +{ + +constexpr const bool maskPermutations[6][4] = {{false, false, false, false}, + {true, false, false, false}, + {false, true, false, false}, + {false, false, true, false}, + {false, false, false, true}, + {true, true, true, true}}; + +template +LayerT* BuildGraph(Graph* graph, const std::vector& inputShapes, Args &&... args) +{ + auto layer = graph->AddLayer(std::forward(args)...); + + uint32_t inputCount = 0; + for (auto inputShape : inputShapes) + { + TensorInfo inputTensorInfo(inputShape, DataType::Float32); + + auto input = graph->AddLayer(static_cast(inputCount), "input"); + input->GetOutputSlot().SetTensorInfo(inputTensorInfo); + input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount)); + inputCount++; + } + + return layer; +} + +template +void RunShapeInferenceTest(LayerT* const layer, + const std::vector> dimensionSizeLists) +{ + std::vector numDimensions; + std::vector expectedOutputShapes; + + for (auto dimensionSizeList : dimensionSizeLists) + { + numDimensions.emplace_back(dimensionSizeList.size()); + expectedOutputShapes.emplace_back(TensorShape(dimensionSizeList)); + } + + const unsigned int outputSize = layer->GetNumOutputSlots(); + + const auto runTestWithMask = [&](const bool maskPermutations[], ShapeInferenceMethod shapeInferenceMethod) + { + for (unsigned int i = 0; i < outputSize; ++i) + { + layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations}, + DataType::Float32}); + } + + layer->ValidateTensorShapesFromInputs(shapeInferenceMethod); + + for (unsigned int i = 0; i < outputSize; ++i) + { + BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]); + } + }; + + // Test inference with Dimensionality::NotSpecified + for (unsigned int j = 0; j < outputSize; ++j) + { + layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32}); + } + + BOOST_CHECK_THROW( + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate); + + for (unsigned int i = 0; i < outputSize; ++i) + { + BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]); + } + + // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size + for (unsigned int i = 0; i <= numDimensions[0]; ++i) + { + runTestWithMask(maskPermutations[i], ShapeInferenceMethod::InferAndValidate); + } + + // maskPermutations[5] equates to all dimensions being known + runTestWithMask(maskPermutations[5], ShapeInferenceMethod::ValidateOnly); + + BOOST_CHECK_THROW( + runTestWithMask(maskPermutations[5], ShapeInferenceMethod::InferAndValidate), LayerValidationException); +} + +template +void CreateGraphAndRunTest(const std::vector& inputShapes, + const std::vector> dimensionSizeLists, + Args &&... args) +{ + Graph graph; + + auto layer = BuildGraph(&graph, inputShapes, std::forward(args)...); + + RunShapeInferenceTest(layer, dimensionSizeLists); +} + +BOOST_AUTO_TEST_CASE(AbsTest) +{ + ActivationDescriptor descriptor; + descriptor.m_Function = ActivationFunction::Abs; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation"); +} + +BOOST_AUTO_TEST_CASE(AdditionTest) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add"); +} + +BOOST_AUTO_TEST_CASE(ArgMinMaxTest) +{ + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Function = ArgMinMaxFunction::Min; + descriptor.m_Axis = 1; + + CreateGraphAndRunTest({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax"); +} + +BOOST_AUTO_TEST_CASE(BatchNormalizationTest) +{ + BatchNormalizationDescriptor descriptor; + CreateGraphAndRunTest({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm"); +} + +BOOST_AUTO_TEST_CASE(BatchToSpaceNdTest) +{ + BatchToSpaceNdDescriptor descriptor; + + std::vector blockShape {2, 2}; + std::vector> crops = {{0, 0}, {0, 0}}; + + descriptor.m_BlockShape = blockShape; + descriptor.m_Crops = crops; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend"); +} + +BOOST_AUTO_TEST_CASE(ComparisionTest) +{ + ComparisonDescriptor descriptor; + descriptor.m_Operation = ComparisonOperation::Equal; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, + {{ 5, 7, 6, 2 }}, + descriptor, + "comparision"); +} + +BOOST_AUTO_TEST_CASE(ConcatTest) +{ + ConcatDescriptor descriptor(2, 3); + + descriptor.SetViewOriginCoord(0, 0, 0); + descriptor.SetViewOriginCoord(1, 0, 1); + + CreateGraphAndRunTest({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat"); +} + +BOOST_AUTO_TEST_CASE(ConstantTesst) +{ + Graph graph; + TensorShape outputShape{ 1, 1, 3, 3 }; + auto layer = BuildGraph(&graph, {}, "constant"); + + const float Datum = 0.0f; + ConstTensor output0({outputShape, DataType::Float32}, &Datum); + layer->m_LayerOutput = std::make_unique(output0); + + layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32}); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly); + + BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape); +} + +BOOST_AUTO_TEST_CASE(ConvertBf16ToFp32Test) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(ConvertFp16ToBf16Test) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(ConvertFp16ToFp32Test) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(ConvertFp32ToFp16Test) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(Convolution2dTest) +{ + const TensorShape inputShape{1, 1, 10, 10}; + + Graph graph; + + Convolution2dDescriptor descriptor; + + descriptor.m_PadLeft = 0; + descriptor.m_PadTop = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadBottom = 0; + descriptor.m_StrideX = 1; + descriptor.m_StrideY = 1; + descriptor.m_DilationX = 3; + descriptor.m_DilationY = 3; + + auto layer = BuildGraph(&graph, + {inputShape}, + descriptor, + "conv2d"); + + const float Datum = 0.0f; + ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum); + layer->m_Weight = std::make_unique(weights); + + RunShapeInferenceTest(layer, {{ 1, 1, 4, 4 }}); +} + +BOOST_AUTO_TEST_CASE(DebugLayerTest) +{ + const TensorShape tensorShape; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug"); +} + +BOOST_AUTO_TEST_CASE(DepthToSpaceTest) +{ + DepthToSpaceDescriptor descriptor; + + descriptor.m_BlockSize = 2; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace"); +} + +BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest) +{ + DepthwiseConvolution2dDescriptor descriptor; + + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 1; + descriptor.m_PadLeft = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_DilationX = 0; + descriptor.m_DilationY = 0; + descriptor.m_DataLayout = DataLayout::NHWC; + descriptor.m_BiasEnabled = false; + + Graph graph; + + auto layer = BuildGraph(&graph, + {{ 8, 16, 2, 1 }}, + descriptor, + "depthwiseconv2d"); + + const float Datum = 0.0f; + ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + layer->m_Weight = std::make_unique(weights); + + RunShapeInferenceTest(layer, {{ 8, 18, 1, 2 }}); +} + +BOOST_AUTO_TEST_CASE(DequantizeTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize"); +} + +BOOST_AUTO_TEST_CASE(DetectionPostProcessTest) +{ + const TensorShape detectionBoxesInfo{ 1, 3, 4 }; + const TensorShape detectionScoresInfo{ 1, 3, 4 }; + const TensorShape detectionClassesInfo{ 1, 3, 4 }; + + armnn::DetectionPostProcessDescriptor descriptor; + descriptor.m_UseRegularNms = true; + descriptor.m_MaxDetections = 3; + descriptor.m_MaxClassesPerDetection = 1; + descriptor.m_DetectionsPerClass =1; + descriptor.m_NmsScoreThreshold = 0.0; + descriptor.m_NmsIouThreshold = 0.5; + descriptor.m_NumClasses = 2; + descriptor.m_ScaleY = 10.0; + descriptor.m_ScaleX = 10.0; + descriptor.m_ScaleH = 5.0; + descriptor.m_ScaleW = 5.0; + + const float Datum = 0.0f; + ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32}, &Datum); + + Graph graph; + + auto layer = BuildGraph(&graph, + {detectionBoxesInfo, detectionScoresInfo}, + descriptor, + "detectionpostprocess"); + + layer->m_Anchors = std::make_unique(anchorsTensor); + + RunShapeInferenceTest(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }}); +} + +BOOST_AUTO_TEST_CASE(FakeQuantizationTest) +{ + FakeQuantizationDescriptor descriptor; + descriptor.m_Max = 1; + descriptor.m_Min = 1; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization"); +} + +BOOST_AUTO_TEST_CASE(FloorTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(FullyConnectedTest) +{ + Graph graph; + + const unsigned int inputWidth = 3u; + const unsigned int inputHeight = 2u; + const unsigned int inputChannels = 1u; + const unsigned int outputChannels = 2u; + + auto layer = BuildGraph(&graph, + {{1, inputChannels, inputHeight, inputWidth}}, + FullyConnectedDescriptor(), + "fc"); + + + const float Datum = 0.0f; + ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum); + layer->m_Weight = std::make_unique(weights); + + RunShapeInferenceTest(layer, {{ 1, outputChannels }}); +} + +BOOST_AUTO_TEST_CASE(GatherTest) +{ + CreateGraphAndRunTest({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather"); +} + +BOOST_AUTO_TEST_CASE(InstanceNormalizationTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, + InstanceNormalizationDescriptor(), + "instancenorm"); +} + +BOOST_AUTO_TEST_CASE(L2NormalizationTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, + L2NormalizationDescriptor(), + "l2norm"); +} + +BOOST_AUTO_TEST_CASE(LogSoftMaxTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax"); +} + +BOOST_AUTO_TEST_CASE(LstmTest) +{ + const TensorShape inputShape{2, 5}; + const TensorShape inputCellState{2, 20}; + const TensorShape expectedOutputShape{2, 20}; + + LstmDescriptor descriptor; + + descriptor.m_ActivationFunc = 4; + descriptor.m_CifgEnabled = false; + descriptor.m_PeepholeEnabled = false; + descriptor.m_ProjectionEnabled = false; + + Graph graph; + auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm"); + + float Datum = 0.0f; + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + + layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_CellBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_ForgetGateBias = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_OutputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputToInputWeights = std::make_unique(constTensor); + + RunShapeInferenceTest(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}}); +} + +BOOST_AUTO_TEST_CASE(MeanLayerTest) +{ + MeanDescriptor descriptor; + descriptor.m_Axis = {0}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean"); +} + +BOOST_AUTO_TEST_CASE(MemCopyTest) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy"); +} + +BOOST_AUTO_TEST_CASE(MemImportTest) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport"); +} + +BOOST_AUTO_TEST_CASE(MergeTest) +{ + const TensorShape tensorShape{ 5, 7, 6, 2 }; + CreateGraphAndRunTest({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge"); +} + +BOOST_AUTO_TEST_CASE(NormalizationTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm"); +} + +BOOST_AUTO_TEST_CASE(PermuteTest) +{ + PermuteDescriptor descriptor; + descriptor.m_DimMappings = {0U, 2U, 3U, 1U}; + + CreateGraphAndRunTest({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute"); +} + +BOOST_AUTO_TEST_CASE(Pooling2dTest) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 4; + descriptor.m_PadLeft = descriptor.m_PadRight = 3; + descriptor.m_PadTop = descriptor.m_PadBottom = 0; + descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + CreateGraphAndRunTest({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d"); +} + +BOOST_AUTO_TEST_CASE(QLstmTest) +{ + const TensorShape inputShape{2, 5}; + const TensorShape inputCellState{2, 20}; + const TensorShape expectedOutputShape{2, 20}; + + QLstmDescriptor descriptor; + + descriptor.m_CifgEnabled = false; + descriptor.m_PeepholeEnabled = false; + descriptor.m_ProjectionEnabled = false; + + Graph graph; + auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm"); + + float Datum = 0.0f; + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + + layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_CellBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_ForgetGateBias = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_OutputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputToInputWeights = std::make_unique(constTensor); + + RunShapeInferenceTest(layer, {{2, 20}, {2, 20}, {2, 20}}); +} + +BOOST_AUTO_TEST_CASE(QuantizedLstmTest) +{ + const TensorShape inputShape{2, 5}; + const TensorShape inputCellState{2, 20}; + const TensorShape expectedOutputShape{2, 20}; + + Graph graph; + auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm"); + + float Datum = 0.0f; + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + + layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique(constTensor); + + RunShapeInferenceTest(layer, {{2, 20}, {2, 20}, {2, 20}}); +} + +BOOST_AUTO_TEST_CASE(QuantizeTest) +{ + const TensorShape tensorShape { 5, 4, 7, 6 }; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean"); +} + +BOOST_AUTO_TEST_CASE(RankTest) +{ + // due to rank having a scalar output we need a custom test + const TensorShape expectedOutputs(Dimensionality::Scalar); + + Graph graph; + auto layer = BuildGraph(&graph, {{ 1, 1, 1, 1 }}, "rank"); + + layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32}); + + BOOST_CHECK_THROW( + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate); + + BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs); + + layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32}); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly); + + BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs); +} + +BOOST_AUTO_TEST_CASE(ReshapeTest) +{ + ReshapeDescriptor descriptor; + + descriptor.m_TargetShape = { 1, 1, 1, 8 }; + + CreateGraphAndRunTest({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape"); +} + +BOOST_AUTO_TEST_CASE(ResizeTest) +{ + ResizeDescriptor descriptor; + + descriptor.m_TargetHeight = 6; + descriptor.m_TargetWidth = 2; + + CreateGraphAndRunTest({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize"); +} + +BOOST_AUTO_TEST_CASE(SliceTest) +{ + SliceDescriptor descriptor; + descriptor.m_Begin = { 1, 0, 1, 2 }; + descriptor.m_Size = { 2, 1, 2, 3 }; + + CreateGraphAndRunTest({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean"); +} + +BOOST_AUTO_TEST_CASE(SpaceToBatchNdTest) +{ + SpaceToBatchNdDescriptor descriptor; + + std::vector blockShape {2, 2}; + std::vector> padlist = {{0, 0}, {0, 0}}; + + descriptor.m_BlockShape = blockShape; + descriptor.m_PadList = padlist; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd"); +} + +BOOST_AUTO_TEST_CASE(SpaceToDepth) +{ + SpaceToDepthDescriptor descriptor; + + descriptor.m_BlockSize = 2; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth"); +} + +BOOST_AUTO_TEST_CASE(SplitterTest) +{ + SplitterDescriptor descriptor(2, 3); + + descriptor.SetViewSize(0, 0, 1); + descriptor.SetViewSize(0, 1, 2); + descriptor.SetViewSize(0, 2, 2); + + descriptor.SetViewSize(1, 0, 1); + descriptor.SetViewSize(1, 1, 2); + descriptor.SetViewSize(1, 2, 2); + + CreateGraphAndRunTest({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter"); +} + +BOOST_AUTO_TEST_CASE(StackTest) +{ + StackDescriptor descriptor; + + descriptor.m_Axis = 0; + descriptor.m_NumInputs = 2; + descriptor.m_InputShape = { 3, 2, 3 }; + + CreateGraphAndRunTest({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack"); +} + +BOOST_AUTO_TEST_CASE(StridedSliceTest) +{ + StridedSliceDescriptor descriptor; + + descriptor.m_Begin = {0, 0, 0, 0}; + descriptor.m_End = {3, 2, 3, 1}; + descriptor.m_Stride = {2, 2, 2, 1}; + + CreateGraphAndRunTest({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice"); +} + +BOOST_AUTO_TEST_CASE(Switchtest) +{ + CreateGraphAndRunTest({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch"); +} + +BOOST_AUTO_TEST_CASE(TransposeConvolution2dTest) +{ + StridedSliceDescriptor descriptor; + + descriptor.m_Begin = {0, 0, 0, 0}; + descriptor.m_End = {3, 2, 3, 1}; + descriptor.m_Stride = {2, 2, 2, 1}; + + CreateGraphAndRunTest({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t"); +} + +BOOST_AUTO_TEST_CASE(TransposeTest) +{ + armnn::TransposeDescriptor descriptor; + descriptor.m_DimMappings = {0U, 3U, 1U, 2U}; + + CreateGraphAndRunTest({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice"); +} + +BOOST_AUTO_TEST_SUITE_END() +} \ No newline at end of file -- cgit v1.2.1