From a097d2a0ed8e30d5aaf6d29ec18d0c39201b7b67 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 24 Nov 2021 15:47:28 +0000 Subject: IVGCVSW-6453 'Move the ArmNN Test Utils code to a physically separate directory' * Created include/armnnTestUtils directory * Moved Arm NN test utils files into armnnTestUtils directory Signed-off-by: Sadik Armagan Change-Id: I03ac54c645c41c52650c4c03b6a58fb1481fef5d --- Android.mk | 10 +- CMakeLists.txt | 39 +- include/armnnTestUtils/DataLayoutUtils.hpp | 60 + include/armnnTestUtils/LayerTestResult.hpp | 63 + include/armnnTestUtils/PredicateResult.hpp | 48 + include/armnnTestUtils/TensorCopyUtils.hpp | 15 + src/armnn/test/CreateWorkload.hpp | 2315 +------------------ src/armnn/test/GraphTests.cpp | 2 +- src/armnn/test/GraphUtils.cpp | 78 - src/armnn/test/GraphUtils.hpp | 24 +- src/armnn/test/InferOutputTests.cpp | 2 +- src/armnn/test/InferOutputTests.hpp | 2 +- src/armnn/test/NetworkTests.cpp | 2 +- src/armnn/test/OptimizerTests.cpp | 2 +- src/armnn/test/PredicateResult.hpp | 45 +- src/armnn/test/RuntimeTests.cpp | 2 +- src/armnn/test/TensorHelpers.hpp | 236 +- src/armnn/test/TestUtils.cpp | 62 - src/armnn/test/TestUtils.hpp | 57 +- src/armnn/test/UnitTests.cpp | 67 - src/armnn/test/UnitTests.hpp | 185 +- .../AddBroadcastReshapeLayerTests.cpp | 4 +- .../optimizations/ConvertConstantsBFloatTests.cpp | 2 +- .../ConvertConstantsFloatToHalfTests.cpp | 2 +- .../ConvertConstantsHalfToFloatTests.cpp | 2 +- src/armnn/test/optimizations/FoldPadTests.cpp | 2 +- .../Fp32NetworkToBf16ConverterTests.cpp | 2 +- .../Fp32NetworkToFp16ConverterTests.cpp | 2 +- .../test/optimizations/FuseActivationTests.cpp | 4 +- .../test/optimizations/FuseBatchNormTests.cpp | 2 +- .../test/optimizations/InsertDebugLayerTests.cpp | 2 +- .../test/optimizations/MovePermuteUpTests.cpp | 2 +- .../test/optimizations/MoveTransposeUpTests.cpp | 2 +- .../OptimizeConsecutiveReshapesTests.cpp | 2 +- .../OptimizeInverseConversionsTests.cpp | 2 +- .../optimizations/OptimizeInversePermutesTests.cpp | 2 +- .../PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp | 2 +- .../test/optimizations/PermuteAsReshapeTests.cpp | 2 +- .../RedirectMembersToConstantInputsTests.cpp | 2 +- .../test/optimizations/ReduceMultipleAxesTests.cpp | 4 +- .../optimizations/SquashEqualSiblingsTests.cpp | 2 +- .../test/optimizations/TransposeAsReshapeTests.cpp | 2 +- .../test/ParserFlatbuffersSerializeFixture.hpp | 2 +- src/armnnTestUtils/CMakeLists.txt | 50 + src/armnnTestUtils/CommonTestUtils.cpp | 70 + src/armnnTestUtils/CommonTestUtils.hpp | 119 + src/armnnTestUtils/CreateWorkload.hpp | 2316 ++++++++++++++++++++ src/armnnTestUtils/DataTypeUtils.hpp | 45 + src/armnnTestUtils/GraphUtils.cpp | 78 + src/armnnTestUtils/GraphUtils.hpp | 25 + src/armnnTestUtils/TensorCopyUtils.cpp | 23 + src/armnnTestUtils/TensorHelpers.hpp | 235 ++ src/armnnTestUtils/TestUtils.cpp | 62 + src/armnnTestUtils/TestUtils.hpp | 58 + src/armnnTestUtils/UnitTests.cpp | 67 + src/armnnTestUtils/UnitTests.hpp | 191 ++ src/armnnTestUtils/WorkloadTestUtils.hpp | 113 + .../test/DetectionPostProcess.cpp | 2 +- .../test/ParserFlatbuffersFixture.hpp | 2 +- src/armnnUtils/ParserPrototxtFixture.hpp | 2 +- src/backends/aclCommon/test/CMakeLists.txt | 1 + .../aclCommon/test/CreateWorkloadClNeon.hpp | 4 +- src/backends/aclCommon/test/MemCopyTestImpl.hpp | 9 +- src/backends/backendsCommon/WorkloadFactory.cpp | 28 +- src/backends/backendsCommon/common.mk | 2 - .../test/ActivationEndToEndTestImpl.hpp | 4 +- .../backendsCommon/test/ActivationFixture.hpp | 6 +- .../test/ArgMinMaxEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/BackendProfilingTests.cpp | 2 +- .../test/BatchToSpaceNdEndToEndTestImpl.hpp | 2 +- src/backends/backendsCommon/test/CMakeLists.txt | 7 + .../test/ChannelShuffleEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/CommonTestUtils.cpp | 70 - .../backendsCommon/test/CommonTestUtils.hpp | 121 +- .../test/ComparisonEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/ConcatEndToEndTestImpl.hpp | 2 +- .../test/Convolution3dEndToEndTestImpl.hpp | 4 +- .../backendsCommon/test/DataLayoutUtils.hpp | 59 +- src/backends/backendsCommon/test/DataTypeUtils.hpp | 44 +- .../test/DepthToSpaceEndToEndTestImpl.hpp | 2 +- .../test/DequantizeEndToEndTestImpl.hpp | 2 +- .../test/DetectionPostProcessEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/DynamicBackendTests.cpp | 2 +- .../test/ElementwiseUnaryEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/EndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/FillEndToEndTestImpl.hpp | 2 +- .../test/FullyConnectedEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/GatherEndToEndTestImpl.hpp | 2 +- .../test/InstanceNormalizationEndToEndTestImpl.cpp | 4 +- .../test/LayerReleaseConstantDataTest.cpp | 2 +- .../test/LogSoftmaxEndToEndTestImpl.cpp | 2 +- .../backendsCommon/test/OptimizationViewsTests.cpp | 2 +- .../test/OptimizeSubgraphViewTests.cpp | 2 +- .../backendsCommon/test/OptimizedNetworkTests.cpp | 2 +- .../backendsCommon/test/PreluEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/QLstmEndToEndTestImpl.cpp | 2 +- .../test/QuantizedLstmEndToEndTestImpl.cpp | 4 +- .../backendsCommon/test/RankEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/ResizeEndToEndTestImpl.hpp | 2 +- .../test/SpaceToDepthEndToEndTestImpl.cpp | 4 +- .../test/SplitterEndToEndTestImpl.hpp | 2 +- .../test/StridedSliceAsyncEndToEndTest.hpp | 2 +- .../backendsCommon/test/TensorCopyUtils.cpp | 23 - .../backendsCommon/test/TensorCopyUtils.hpp | 16 +- .../TransposeConvolution2dEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/WorkloadDataValidation.cpp | 2 +- .../backendsCommon/test/WorkloadTestUtils.hpp | 114 +- .../backendsCommon/test/layerTests/AbsTestImpl.hpp | 2 +- .../test/layerTests/ActivationTestImpl.cpp | 6 +- .../test/layerTests/ActivationTestImpl.hpp | 2 +- .../test/layerTests/AdditionTestImpl.hpp | 2 +- .../test/layerTests/ArgMinMaxTestImpl.cpp | 8 +- .../test/layerTests/ArgMinMaxTestImpl.hpp | 2 +- .../test/layerTests/BatchNormalizationTestImpl.cpp | 6 +- .../test/layerTests/BatchNormalizationTestImpl.hpp | 2 +- .../test/layerTests/BatchToSpaceNdTestImpl.hpp | 13 +- .../test/layerTests/CastTestImpl.hpp | 2 +- .../test/layerTests/ChannelShuffleTestImpl.cpp | 6 +- .../test/layerTests/ChannelShuffleTestImpl.hpp | 2 +- .../test/layerTests/ComparisonTestImpl.cpp | 6 +- .../test/layerTests/ComparisonTestImpl.hpp | 2 +- .../test/layerTests/ConcatTestImpl.cpp | 6 +- .../test/layerTests/ConcatTestImpl.hpp | 2 +- .../test/layerTests/ConstantTestImpl.cpp | 6 +- .../test/layerTests/ConstantTestImpl.hpp | 2 +- .../test/layerTests/Conv2dTestImpl.cpp | 8 +- .../test/layerTests/Conv2dTestImpl.hpp | 2 +- .../test/layerTests/Conv3dTestImpl.cpp | 8 +- .../test/layerTests/Conv3dTestImpl.hpp | 2 +- .../test/layerTests/ConvertBf16ToFp32TestImpl.cpp | 6 +- .../test/layerTests/ConvertBf16ToFp32TestImpl.hpp | 2 +- .../test/layerTests/ConvertFp16ToFp32TestImpl.cpp | 6 +- .../test/layerTests/ConvertFp16ToFp32TestImpl.hpp | 2 +- .../test/layerTests/ConvertFp32ToBf16TestImpl.cpp | 6 +- .../test/layerTests/ConvertFp32ToBf16TestImpl.hpp | 2 +- .../test/layerTests/ConvertFp32ToFp16TestImpl.cpp | 6 +- .../test/layerTests/ConvertFp32ToFp16TestImpl.hpp | 2 +- .../test/layerTests/DebugTestImpl.cpp | 6 +- .../test/layerTests/DebugTestImpl.hpp | 2 +- .../test/layerTests/DepthToSpaceTestImpl.cpp | 8 +- .../test/layerTests/DepthToSpaceTestImpl.hpp | 2 +- .../test/layerTests/DequantizeTestImpl.cpp | 6 +- .../test/layerTests/DequantizeTestImpl.hpp | 2 +- .../layerTests/DetectionPostProcessTestImpl.hpp | 6 +- .../test/layerTests/DivisionTestImpl.hpp | 2 +- .../test/layerTests/ElementwiseTestImpl.hpp | 10 +- .../test/layerTests/ElementwiseUnaryTestImpl.hpp | 10 +- .../backendsCommon/test/layerTests/ExpTestImpl.hpp | 2 +- .../test/layerTests/FakeQuantizationTestImpl.cpp | 6 +- .../test/layerTests/FakeQuantizationTestImpl.hpp | 2 +- .../test/layerTests/FillTestImpl.cpp | 8 +- .../test/layerTests/FillTestImpl.hpp | 2 +- .../test/layerTests/FloorTestImpl.cpp | 8 +- .../test/layerTests/FloorTestImpl.hpp | 2 +- .../test/layerTests/FullyConnectedTestImpl.cpp | 8 +- .../test/layerTests/FullyConnectedTestImpl.hpp | 2 +- .../test/layerTests/GatherTestImpl.cpp | 6 +- .../test/layerTests/GatherTestImpl.hpp | 2 +- .../layerTests/InstanceNormalizationTestImpl.cpp | 8 +- .../layerTests/InstanceNormalizationTestImpl.hpp | 2 +- .../test/layerTests/L2NormalizationTestImpl.cpp | 6 +- .../test/layerTests/L2NormalizationTestImpl.hpp | 2 +- .../test/layerTests/LayerTestResult.hpp | 61 +- .../test/layerTests/LogSoftmaxTestImpl.cpp | 6 +- .../test/layerTests/LogSoftmaxTestImpl.hpp | 2 +- .../backendsCommon/test/layerTests/LogTestImpl.hpp | 2 +- .../test/layerTests/LogicalTestImpl.cpp | 6 +- .../test/layerTests/LogicalTestImpl.hpp | 2 +- .../test/layerTests/LstmTestImpl.cpp | 6 +- .../test/layerTests/LstmTestImpl.hpp | 2 +- .../test/layerTests/MaximumTestImpl.hpp | 2 +- .../test/layerTests/MeanTestImpl.hpp | 2 +- .../test/layerTests/MinimumTestImpl.hpp | 2 +- .../test/layerTests/MirrorPadTestImpl.cpp | 6 +- .../test/layerTests/MirrorPadTestImpl.hpp | 2 +- .../test/layerTests/MultiplicationTestImpl.hpp | 2 +- .../backendsCommon/test/layerTests/NegTestImpl.hpp | 2 +- .../test/layerTests/NormalizationTestImpl.cpp | 6 +- .../test/layerTests/NormalizationTestImpl.hpp | 2 +- .../backendsCommon/test/layerTests/PadTestImpl.cpp | 6 +- .../backendsCommon/test/layerTests/PadTestImpl.hpp | 2 +- .../test/layerTests/PermuteTestImpl.hpp | 4 +- .../test/layerTests/Pooling2dTestImpl.cpp | 6 +- .../test/layerTests/Pooling2dTestImpl.hpp | 2 +- .../test/layerTests/Pooling3dTestImpl.cpp | 6 +- .../test/layerTests/Pooling3dTestImpl.hpp | 2 +- .../test/layerTests/PreluTestImpl.hpp | 8 +- .../test/layerTests/QuantizeTestImpl.cpp | 6 +- .../test/layerTests/QuantizeTestImpl.hpp | 2 +- .../test/layerTests/RankTestImpl.cpp | 8 +- .../test/layerTests/RankTestImpl.hpp | 2 +- .../test/layerTests/ReduceProdTestImpl.cpp | 8 +- .../test/layerTests/ReduceProdTestImpl.hpp | 2 +- .../test/layerTests/ReduceSumTestImpl.cpp | 8 +- .../test/layerTests/ReduceSumTestImpl.hpp | 2 +- .../test/layerTests/ReductionTestImpl.cpp | 8 +- .../test/layerTests/ReductionTestImpl.hpp | 2 +- .../test/layerTests/ReshapeTestImpl.cpp | 8 +- .../test/layerTests/ReshapeTestImpl.hpp | 2 +- .../test/layerTests/ResizeTestImpl.cpp | 8 +- .../test/layerTests/ResizeTestImpl.hpp | 2 +- .../test/layerTests/RsqrtTestImpl.hpp | 2 +- .../test/layerTests/ShapeTestImpl.cpp | 8 +- .../test/layerTests/ShapeTestImpl.hpp | 2 +- .../backendsCommon/test/layerTests/SinTestImpl.hpp | 2 +- .../test/layerTests/SliceTestImpl.cpp | 6 +- .../test/layerTests/SliceTestImpl.hpp | 2 +- .../test/layerTests/SoftmaxTestImpl.cpp | 6 +- .../test/layerTests/SoftmaxTestImpl.hpp | 2 +- .../test/layerTests/SpaceToBatchNdTestImpl.cpp | 6 +- .../test/layerTests/SpaceToBatchNdTestImpl.hpp | 2 +- .../test/layerTests/SpaceToDepthTestImpl.cpp | 6 +- .../test/layerTests/SpaceToDepthTestImpl.hpp | 2 +- .../test/layerTests/SplitterTestImpl.cpp | 6 +- .../test/layerTests/SplitterTestImpl.hpp | 2 +- .../test/layerTests/StackTestImpl.cpp | 8 +- .../test/layerTests/StackTestImpl.hpp | 2 +- .../test/layerTests/StridedSliceTestImpl.cpp | 6 +- .../test/layerTests/StridedSliceTestImpl.hpp | 2 +- .../test/layerTests/SubtractionTestImpl.hpp | 2 +- .../layerTests/TransposeConvolution2dTestImpl.cpp | 8 +- .../layerTests/TransposeConvolution2dTestImpl.hpp | 2 +- .../test/layerTests/TransposeTestImpl.hpp | 4 +- .../UnidirectionalSequenceLstmTestImpl.cpp | 4 +- .../UnidirectionalSequenceLstmTestImpl.hpp | 2 +- src/backends/cl/test/CMakeLists.txt | 1 + src/backends/cl/test/ClCreateWorkloadTests.cpp | 4 +- src/backends/cl/test/ClFallbackTests.cpp | 4 +- src/backends/cl/test/ClLayerSupportTests.cpp | 2 +- src/backends/cl/test/ClLayerTests.cpp | 4 +- src/backends/cl/test/ClOptimizedNetworkTests.cpp | 2 +- src/backends/cl/test/OpenClTimerTest.cpp | 6 +- src/backends/neon/test/CMakeLists.txt | 1 + src/backends/neon/test/NeonFallbackTests.cpp | 4 +- src/backends/neon/test/NeonLayerSupportTests.cpp | 2 +- src/backends/neon/test/NeonLayerTests.cpp | 4 +- src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp | 3 +- src/backends/neon/test/NeonTensorHandleTests.cpp | 4 +- src/backends/neon/test/NeonTimerTest.cpp | 6 +- src/backends/reference/test/CMakeLists.txt | 1 + .../reference/test/RefCreateWorkloadTests.cpp | 2 +- .../reference/test/RefLayerSupportTests.cpp | 3 +- src/backends/reference/test/RefLayerTests.cpp | 2 +- .../reference/test/RefOptimizedNetworkTests.cpp | 2 +- .../workloads/RefChannelShuffleWorkload.cpp | 1 - src/profiling/test/ProfilingTestUtils.cpp | 2 +- 246 files changed, 4105 insertions(+), 3919 deletions(-) create mode 100644 include/armnnTestUtils/DataLayoutUtils.hpp create mode 100644 include/armnnTestUtils/LayerTestResult.hpp create mode 100644 include/armnnTestUtils/PredicateResult.hpp create mode 100644 include/armnnTestUtils/TensorCopyUtils.hpp delete mode 100644 src/armnn/test/GraphUtils.cpp delete mode 100644 src/armnn/test/TestUtils.cpp delete mode 100644 src/armnn/test/UnitTests.cpp create mode 100755 src/armnnTestUtils/CMakeLists.txt create mode 100644 src/armnnTestUtils/CommonTestUtils.cpp create mode 100644 src/armnnTestUtils/CommonTestUtils.hpp create mode 100644 src/armnnTestUtils/CreateWorkload.hpp create mode 100644 src/armnnTestUtils/DataTypeUtils.hpp create mode 100644 src/armnnTestUtils/GraphUtils.cpp create mode 100644 src/armnnTestUtils/GraphUtils.hpp create mode 100644 src/armnnTestUtils/TensorCopyUtils.cpp create mode 100644 src/armnnTestUtils/TensorHelpers.hpp create mode 100644 src/armnnTestUtils/TestUtils.cpp create mode 100644 src/armnnTestUtils/TestUtils.hpp create mode 100644 src/armnnTestUtils/UnitTests.cpp create mode 100644 src/armnnTestUtils/UnitTests.hpp create mode 100644 src/armnnTestUtils/WorkloadTestUtils.hpp delete mode 100644 src/backends/backendsCommon/test/CommonTestUtils.cpp delete mode 100644 src/backends/backendsCommon/test/TensorCopyUtils.cpp diff --git a/Android.mk b/Android.mk index c3cb155731..fc5900be0c 100644 --- a/Android.mk +++ b/Android.mk @@ -16,6 +16,7 @@ ARMNN_THIRD_PARTY_INCLUDE_PATH := $(LOCAL_PATH)/third-party ARMNN_MAIN_HEADER_PATH := $(LOCAL_PATH)/src ARMNN_SOURCE_HEADER_PATH := $(LOCAL_PATH)/src/armnn ARMNN_SOURCE_UTILS_HEADER_PATH := $(LOCAL_PATH)/src/armnnUtils +ARMNN_TEST_UTILS_SOURCE_PATH := $(LOCAL_PATH)/src/armnnTestUtils ARMNN_BACKENDS_HEADER_PATH := $(LOCAL_PATH)/src/backends ARMNN_PROFILING_HEADER_PATH := $(LOCAL_PATH)/src/profiling ARMNN_SERIALIZER_HEADER_PATH := $(LOCAL_PATH)/src/armnnSerializer @@ -347,6 +348,7 @@ LOCAL_C_INCLUDES := \ $(ARMNN_MAIN_HEADER_PATH) \ $(ARMNN_SOURCE_HEADER_PATH) \ $(ARMNN_SOURCE_UTILS_HEADER_PATH) \ + $(ARMNN_TEST_UTILS_SOURCE_PATH) \ $(ARMNN_PROFILING_HEADER_PATH) \ $(ARMNN_BACKENDS_HEADER_PATH) \ $(ARMNN_SERIALIZER_HEADER_PATH) \ @@ -382,7 +384,6 @@ LOCAL_SRC_FILES := \ src/armnn/test/FloatingPointConverterTest.cpp \ src/armnn/test/FlowControl.cpp \ src/armnn/test/GraphTests.cpp \ - src/armnn/test/GraphUtils.cpp \ src/armnn/test/InferOutputTests.cpp \ src/armnn/test/InstrumentTests.cpp \ src/armnnUtils/ModelAccuracyChecker.cpp \ @@ -419,12 +420,15 @@ LOCAL_SRC_FILES := \ src/armnn/test/TestLayerVisitor.cpp \ src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp \ src/armnn/test/TestNameOnlyLayerVisitor.cpp \ - src/armnn/test/TestUtils.cpp \ - src/armnn/test/UnitTests.cpp \ src/armnn/test/UtilsTests.cpp \ src/armnnUtils/test/ParserHelperTest.cpp \ src/armnnUtils/test/QuantizeHelperTest.cpp \ src/armnnUtils/test/TensorUtilsTest.cpp \ + src/armnnTestUtils/CommonTestUtils.cpp \ + src/armnnTestUtils/GraphUtils.cpp \ + src/armnnTestUtils/TensorCopyUtils.cpp \ + src/armnnTestUtils/TestUtils.cpp \ + src/armnnTestUtils/UnitTests.cpp \ src/profiling/test/BufferTests.cpp \ src/profiling/test/FileOnlyProfilingDecoratorTests.cpp \ src/profiling/test/PrintPacketHeaderHandler.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index fde058216b..796a829ca5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,6 +54,7 @@ add_subdirectory(samples) add_subdirectory(src/armnnTfLiteParser) add_subdirectory(src/armnnSerializer) add_subdirectory(src/armnnDeserializer) +add_subdirectory(src/armnnTestUtils) if (BUILD_ARMNN_TFLITE_DELEGATE) @@ -116,33 +117,6 @@ list(APPEND armnnUtils_sources add_library_ex(armnnUtils STATIC ${armnnUtils_sources}) target_include_directories(armnnUtils PRIVATE src/backends) -# armnnTestUtils library provides useful test functions for backend developers. -set(armnnTestUtils_sources) -list(APPEND armnnTestUtils_sources - src/armnn/test/CreateWorkload.hpp - src/armnn/test/GraphUtils.hpp - src/armnn/test/GraphUtils.cpp - src/armnn/test/PredicateResult.hpp - src/armnn/test/TensorHelpers.hpp - src/armnn/test/TestUtils.hpp - src/armnn/test/TestUtils.cpp - src/armnn/test/UnitTests.hpp - src/backends/backendsCommon/test/CommonTestUtils.hpp - src/backends/backendsCommon/test/CommonTestUtils.cpp - src/backends/backendsCommon/test/DataLayoutUtils.hpp - src/backends/backendsCommon/test/DataTypeUtils.hpp - src/backends/backendsCommon/test/TensorCopyUtils.hpp - src/backends/backendsCommon/test/TensorCopyUtils.cpp - src/backends/backendsCommon/test/WorkloadTestUtils.hpp - src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp - ) - -add_library_ex(armnnTestUtils STATIC ${armnnTestUtils_sources}) -target_include_directories(armnnTestUtils PRIVATE src/armnn) -target_include_directories(armnnTestUtils PRIVATE src/armnnUtils) -target_include_directories(armnnTestUtils PRIVATE src/backends) -target_include_directories(armnnTestUtils PRIVATE src/profiling) - if(BUILD_ONNX_PARSER) set(armnn_onnx_parser_sources) list(APPEND armnn_onnx_parser_sources @@ -552,7 +526,6 @@ target_include_directories(armnn ) target_link_libraries(armnn armnnUtils) -target_link_libraries(armnn armnnTestUtils) # only link pipeCommon if it has been built if(BUILD_TIMELINE_DECODER) target_link_libraries(armnn pipeCommon) @@ -591,11 +564,13 @@ if(BUILD_UNIT_TESTS) src/armnn/test/CloneTests.cpp src/armnn/test/ConstTensorLayerVisitor.hpp src/armnn/test/ConstTensorLayerVisitor.cpp + src/armnn/test/CreateWorkload.hpp src/armnn/test/EndToEndTest.cpp src/armnn/test/ExecutionFrameTest.cpp src/armnn/test/FloatingPointConverterTest.cpp src/armnn/test/FlowControl.cpp src/armnn/test/GraphTests.cpp + src/armnn/test/GraphUtils.hpp src/armnn/test/InstrumentTests.cpp src/armnn/test/InferOutputTests.cpp src/armnn/test/InferOutputTests.hpp @@ -625,11 +600,13 @@ if(BUILD_UNIT_TESTS) src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp src/armnn/test/optimizations/TransposeAsReshapeTests.cpp src/armnn/test/OptionalTest.cpp + src/armnn/test/PredicateResult.hpp src/armnn/test/ProfilerTests.cpp src/armnn/test/ProfilingEventTest.cpp src/armnn/test/ShapeInferenceTests.cpp src/armnn/test/SubgraphViewTests.cpp src/armnn/test/TensorHandleStrategyTest.cpp + src/armnn/test/TensorHelpers.hpp src/armnn/test/TensorTest.cpp src/armnn/test/TestInputOutputLayerVisitor.cpp src/armnn/test/TestInputOutputLayerVisitor.hpp @@ -637,9 +614,10 @@ if(BUILD_UNIT_TESTS) src/armnn/test/TestLayerVisitor.hpp src/armnn/test/TestNameOnlyLayerVisitor.cpp src/armnn/test/TestNameOnlyLayerVisitor.hpp + src/armnn/test/TestUtils.hpp + src/armnn/test/UnitTests.hpp src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp - src/armnn/test/UnitTests.cpp src/armnn/test/UtilityTests.cpp src/armnn/test/UtilsTests.cpp src/armnnUtils/test/FloatingPointComparisonTest.cpp @@ -866,6 +844,7 @@ if(BUILD_UNIT_TESTS) add_executable(UnitTests ${unittest_sources}) target_include_directories(UnitTests PRIVATE src/armnn) target_include_directories(UnitTests PRIVATE src/armnnUtils) + target_include_directories(UnitTests PRIVATE src/armnnTestUtils) target_include_directories(UnitTests PRIVATE src/backends) target_include_directories(UnitTests PRIVATE src/profiling) @@ -1017,7 +996,6 @@ set(armnn_export_targets) list(APPEND armnn_export_targets armnn armnnUtils - armnnTestUtils ) install( @@ -1073,7 +1051,6 @@ export( add_library(Armnn::Armnn ALIAS armnn) add_library(Armnn::armnnUtils ALIAS armnnUtils) -add_library(Armnn::armnnTestUtils ALIAS armnnTestUtils) #################################################### ## Build Python bindings diff --git a/include/armnnTestUtils/DataLayoutUtils.hpp b/include/armnnTestUtils/DataLayoutUtils.hpp new file mode 100644 index 0000000000..fde6f172cc --- /dev/null +++ b/include/armnnTestUtils/DataLayoutUtils.hpp @@ -0,0 +1,60 @@ +// +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +#include + +template +void PermuteTensorNchwToNhwc(armnn::TensorInfo& tensorInfo, std::vector& tensorData) +{ + const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 }; + + tensorInfo = armnnUtils::Permuted(tensorInfo, nchwToNhwc); + + std::vector tmp(tensorData.size()); + armnnUtils::Permute(tensorInfo.GetShape(), nchwToNhwc, tensorData.data(), tmp.data(), sizeof(T)); + tensorData = tmp; +} + +template +void PermuteTensorNhwcToNchw(armnn::TensorInfo& tensorInfo, std::vector& tensorData) +{ + const armnn::PermutationVector nhwcToNchw = { 0, 2, 3, 1 }; + + tensorInfo = armnnUtils::Permuted(tensorInfo, nhwcToNchw); + + std::vector tmp(tensorData.size()); + armnnUtils::Permute(tensorInfo.GetShape(), nhwcToNchw, tensorData.data(), tmp.data(), sizeof(T)); + + tensorData = tmp; +} + +template +void PermuteTensorNdhwcToNcdhw(armnn::TensorInfo& tensorInfo, std::vector& tensorData) +{ + const armnn::PermutationVector ndhwcToNcdhw = { 0, 2, 3, 4, 1 }; + + tensorInfo = armnnUtils::Permuted(tensorInfo, ndhwcToNcdhw); + + std::vector tmp(tensorData.size()); + armnnUtils::Permute(tensorInfo.GetShape(), ndhwcToNcdhw, tensorData.data(), tmp.data(), sizeof(T)); + tensorData = tmp; +} + +template +void PermuteTensorNcdhwToNdhwc(armnn::TensorInfo& tensorInfo, std::vector& tensorData) +{ + const armnn::PermutationVector ncdhwToNdhwc = { 0, 4, 1, 2, 3 }; + + tensorInfo = armnnUtils::Permuted(tensorInfo, ncdhwToNdhwc); + + std::vector tmp(tensorData.size()); + armnnUtils::Permute(tensorInfo.GetShape(), ncdhwToNdhwc, tensorData.data(), tmp.data(), sizeof(T)); + tensorData = tmp; +} diff --git a/include/armnnTestUtils/LayerTestResult.hpp b/include/armnnTestUtils/LayerTestResult.hpp new file mode 100644 index 0000000000..410973e4b1 --- /dev/null +++ b/include/armnnTestUtils/LayerTestResult.hpp @@ -0,0 +1,63 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +#include +#include + +template +struct LayerTestResult +{ + LayerTestResult(const armnn::TensorInfo& outputInfo) + : m_Supported(true) + , m_CompareBoolean(false) + { + m_ActualData.reserve(outputInfo.GetNumElements()); + m_ExpectedData.reserve(outputInfo.GetNumElements()); + m_ActualShape = outputInfo.GetShape(); + m_ExpectedShape = outputInfo.GetShape(); + } + + LayerTestResult(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape) + : m_ActualData(actualData) + , m_ExpectedData(expectedData) + , m_ActualShape(actualShape) + , m_ExpectedShape(expectedShape) + , m_Supported(true) + , m_CompareBoolean(false) + {} + + LayerTestResult(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape, + const bool compareBoolean) + : m_ActualData(actualData) + , m_ExpectedData(expectedData) + , m_ActualShape(actualShape) + , m_ExpectedShape(expectedShape) + , m_Supported(true) + , m_CompareBoolean(compareBoolean) + {} + + std::vector m_ActualData; + std::vector m_ExpectedData; + armnn::TensorShape m_ActualShape; + armnn::TensorShape m_ExpectedShape; + + bool m_Supported; + bool m_CompareBoolean; +}; + + + + diff --git a/include/armnnTestUtils/PredicateResult.hpp b/include/armnnTestUtils/PredicateResult.hpp new file mode 100644 index 0000000000..a344c8e3ad --- /dev/null +++ b/include/armnnTestUtils/PredicateResult.hpp @@ -0,0 +1,48 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +namespace armnn +{ + +class PredicateResult +{ +public: + explicit PredicateResult(bool result) + : m_Result(result) + {} + + PredicateResult(const PredicateResult& predicateResult) + : m_Result(predicateResult.m_Result) + , m_Message(predicateResult.m_Message.str()) + {} + + void SetResult(bool newResult) + { + m_Result = newResult; + } + + std::stringstream& Message() + { + return m_Message; + } + + bool operator!() const + { + return !m_Result; + } + + void operator=(PredicateResult otherPredicateResult) + { + otherPredicateResult.m_Result = m_Result; + } + + bool m_Result; + std::stringstream m_Message; +}; + +} // namespace armnn \ No newline at end of file diff --git a/include/armnnTestUtils/TensorCopyUtils.hpp b/include/armnnTestUtils/TensorCopyUtils.hpp new file mode 100644 index 0000000000..ae6072e46e --- /dev/null +++ b/include/armnnTestUtils/TensorCopyUtils.hpp @@ -0,0 +1,15 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include + +void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory); + +void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle); + +void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory); \ No newline at end of file diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index ea8a436177..ae07253841 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -2,2315 +2,8 @@ // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include "TestUtils.hpp" - -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include - -using namespace armnn; - -namespace -{ - -using namespace std; - -// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type. -template -std::unique_ptr MakeAndCheckWorkload(Layer& layer, - const IWorkloadFactory& factory, - const ModelOptions& modelOptions = {}) -{ - std::unique_ptr workload = layer.CreateWorkload(factory); - CHECK_MESSAGE(workload.get() == PolymorphicDowncast(workload.get()), - "Cannot convert to derived class"); - std::string reasonIfUnsupported; - layer.SetBackendId(factory.GetBackendId()); - CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions)); - return std::unique_ptr(static_cast(workload.release())); -} - -// Helper function to create tensor handlers for workloads, assuming they all use the same factory. -void CreateTensorHandles(armnn::Graph& graph, - armnn::IWorkloadFactory& factory) -{ - TensorHandleFactoryRegistry tmpRegistry; - for (auto&& layer : graph.TopologicalSort()) - { - layer->CreateTensorHandles(tmpRegistry, factory); - } -} - -///////////////////////////////////////////////////////////////////////////////////////////// -// The following functions are called by backendsCommon/test/CreateWorkload*.cpp -// They build very simple graphs, and then create a workload. -// Some checks are performed on the workload to ensure parameters have been passed correctly. -// They return the created workloads so that backend-specific checks can be performed. -///////////////////////////////////////////////////////////////////////////////////////////// - -template -std::unique_ptr CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - ActivationDescriptor layerDesc; - layerDesc.m_Function = ActivationFunction::Abs; - layerDesc.m_A = 3.5f; - layerDesc.m_B = -10.0f; - - ActivationLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({1, 1}, DataType); - - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - ActivationQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK(queueDescriptor.m_Parameters.m_A == 3.5f); - CHECK(queueDescriptor.m_Parameters.m_B == -10.0f); - CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs)); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateElementwiseWorkloadTest(armnn::IWorkloadFactory & factory, - armnn::Graph & graph) -{ - // Creates the layer we're testing. - Layer* const layer = graph.AddLayer("layer"); - - // Creates extra layers. - Layer* const input1 = graph.AddLayer(1, "input1"); - Layer* const input2 = graph.AddLayer(2, "input2"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({2, 3}, DataType); - Connect(input1, layer, tensorInfo, 0, 0); - Connect(input2, layer, tensorInfo, 0, 1); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - DescriptorType queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 2); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateSubtractionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - SubtractionLayer* const layer = graph.AddLayer("layer"); - - auto activationDesc = std::make_shared(); - activationDesc->m_A = 10.0f; - activationDesc->m_B = 5.0f; - activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; - - layer->SetAdditionalInfoForObject(activationDesc); - - // Creates extra layers. - Layer* const input1 = graph.AddLayer(1, "input1"); - Layer* const input2 = graph.AddLayer(2, "input2"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({2, 3}, DataType); - Connect(input1, layer, tensorInfo, 0, 0); - Connect(input2, layer, tensorInfo, 0, 1); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Check that the additional information can be queried from the layer - std::shared_ptr - activationDescPtr = layer->GetAdditionalInformation(); - - ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - DescriptorType queueDescriptor = workload->GetData(); - - const ActivationDescriptor* queueDescBlobPtr = - queueDescriptor.template GetAdditionalInformation(); - IgnoreUnused(queueDescBlobPtr); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - CHECK(queueDescriptor.m_Inputs.size() == 2); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - return workload; -} - -template -std::unique_ptr CreateMultiplicationWithBlobWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - MultiplicationLayer* const layer = graph.AddLayer("layer"); - - auto activationDesc = std::make_shared(); - activationDesc->m_A = 10.0f; - activationDesc->m_B = 5.0f; - activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; - - layer->SetAdditionalInfoForObject(activationDesc); - - // Creates extra layers. - Layer* const input1 = graph.AddLayer(1, "input1"); - Layer* const input2 = graph.AddLayer(2, "input2"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({2, 3}, DataType); - Connect(input1, layer, tensorInfo, 0, 0); - Connect(input2, layer, tensorInfo, 0, 1); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Check that the additional information can be queried from the layer - std::shared_ptr - activationDescPtr = layer->GetAdditionalInformation(); - - ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - DescriptorType queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 2); - CHECK(queueDescriptor.m_Outputs.size() == 1); - const ActivationDescriptor* queueDescBlobPtr = - queueDescriptor.template GetAdditionalInformation(); - IgnoreUnused(queueDescBlobPtr); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - return workload;// Returns so we can do extra, backend-specific tests. -} - -template -std::unique_ptr CreateAdditionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - AdditionLayer* const layer = graph.AddLayer("layer"); - - auto activationDesc = std::make_shared(); - activationDesc->m_A = 10.0f; - activationDesc->m_B = 5.0f; - activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; - - layer->SetAdditionalInfoForObject(activationDesc); - - // Creates extra layers. - Layer* const input1 = graph.AddLayer(1, "input1"); - Layer* const input2 = graph.AddLayer(2, "input2"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({2, 3}, DataType); - Connect(input1, layer, tensorInfo, 0, 0); - Connect(input2, layer, tensorInfo, 0, 1); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Check that the additional information can be queried from the layer - std::shared_ptr - activationDescPtr = layer->template GetAdditionalInformation(); - - ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - DescriptorType queueDescriptor = workload->GetData(); - const ActivationDescriptor* queueDescBlobPtr = - queueDescriptor.template GetAdditionalInformation(); - IgnoreUnused(queueDescBlobPtr); - CHECK(queueDescriptor.m_Inputs.size() == 2); - CHECK(queueDescriptor.m_Outputs.size() == 1); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - return workload; -} - -template -std::unique_ptr CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory, - armnn::Graph & graph, - armnn::UnaryOperation op) -{ - ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op); - Layer* const layer = graph.AddLayer(desc, "layer"); - - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - armnn::TensorInfo tensorInfo({ 2, 3 }, DataType); - Connect(input, layer, tensorInfo, 0, 0); - Connect(layer, output, tensorInfo, 0, 0); - CreateTensorHandles(graph, factory); - - auto workload = MakeAndCheckWorkload(*layer, factory); - DescriptorType queueDescriptor = workload->GetData(); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - return workload; -} - -template -std::unique_ptr CreateBatchNormalizationWorkloadTest( - armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) -{ - TensorShape tensorShape; - switch (dataLayout) - { - case DataLayout::NHWC: - tensorShape = { 2, 4, 4, 3 }; - break; - case DataLayout::NCHW: - default: - tensorShape = { 2, 3, 4, 4 }; - } - - // Creates the layer we're testing. - BatchNormalizationDescriptor layerDesc; - layerDesc.m_Eps = 0.05f; - layerDesc.m_DataLayout = dataLayout; - - BatchNormalizationLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - armnn::TensorInfo weightInfo({3}, DataType); - layer->m_Mean = std::make_unique(weightInfo); - layer->m_Variance = std::make_unique(weightInfo); - layer->m_Beta = std::make_unique(weightInfo); - layer->m_Gamma = std::make_unique(weightInfo); - layer->m_Mean->Allocate(); - layer->m_Variance->Allocate(); - layer->m_Beta->Allocate(); - layer->m_Gamma->Allocate(); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo(tensorShape, DataType); - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateBatchNormalizationWithBlobWorkloadTest( - armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) -{ - TensorShape tensorShape; - switch (dataLayout) - { - case DataLayout::NHWC: - tensorShape = { 2, 4, 4, 3 }; - break; - case DataLayout::NCHW: - default: - tensorShape = { 2, 3, 4, 4 }; - } - - // Creates the layer we're testing. - BatchNormalizationDescriptor layerDesc; - layerDesc.m_Eps = 0.05f; - layerDesc.m_DataLayout = dataLayout; - - BatchNormalizationLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - armnn::TensorInfo weightInfo({3}, DataType); - layer->m_Mean = std::make_unique(weightInfo); - layer->m_Variance = std::make_unique(weightInfo); - layer->m_Beta = std::make_unique(weightInfo); - layer->m_Gamma = std::make_unique(weightInfo); - layer->m_Mean->Allocate(); - layer->m_Variance->Allocate(); - layer->m_Beta->Allocate(); - layer->m_Gamma->Allocate(); - - auto activationDesc = std::make_shared(); - activationDesc->m_A = 10.0f; - activationDesc->m_B = 5.0f; - activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; - - layer->SetAdditionalInfoForObject(activationDesc); - - // Check that the additional information can be queried from the layer - std::shared_ptr activationDescPtr = layer->GetAdditionalInformation(); - ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo(tensorShape, DataType); - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation(); - IgnoreUnused(queueDescBlobPtr); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType))); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW, - const ModelOptions& modelOptions = {}) -{ - // Creates the layer we're testing. - Convolution2dDescriptor layerDesc; - layerDesc.m_PadLeft = 3; - layerDesc.m_PadRight = 3; - layerDesc.m_PadTop = 1; - layerDesc.m_PadBottom = 1; - layerDesc.m_StrideX = 2; - layerDesc.m_StrideY = 4; - layerDesc.m_BiasEnabled = true; - layerDesc.m_DataLayout = dataLayout; - - Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3}; - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; - - layer->m_Weight = std::make_unique(TensorInfo(weightShape, DataType)); - layer->m_Bias = std::make_unique(TensorInfo({2}, GetBiasDataType(DataType))); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - Connect(input, layer, TensorInfo(inputShape, DataType)); - Connect(layer, output, TensorInfo(outputShape, DataType)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory, modelOptions); - - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_StrideX == 2); - CHECK(queueDescriptor.m_Parameters.m_StrideY == 4); - CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3); - CHECK(queueDescriptor.m_Parameters.m_PadRight == 3); - CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); - CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); - CHECK(queueDescriptor.m_Parameters.m_BiasEnabled); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); - CHECK((queueDescriptor.m_Bias->GetTensorInfo() == - TensorInfo({2}, GetBiasDataType(DataType)))); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateConvolution2dFusedActivationWithBlobWorkloadTest( - armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW, - const ModelOptions& modelOptions = {}) -{ - // Creates the layer we're testing. - Convolution2dDescriptor layerDesc; - layerDesc.m_PadLeft = 3; - layerDesc.m_PadRight = 3; - layerDesc.m_PadTop = 1; - layerDesc.m_PadBottom = 1; - layerDesc.m_StrideX = 2; - layerDesc.m_StrideY = 4; - layerDesc.m_BiasEnabled = true; - layerDesc.m_DataLayout = dataLayout; - - - Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3}; - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; - - layer->m_Weight = std::make_unique(TensorInfo(weightShape, DataType)); - layer->m_Bias = std::make_unique(TensorInfo({2}, GetBiasDataType(DataType))); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - - auto activationDesc = std::make_shared(); - activationDesc->m_A = 10.0f; - activationDesc->m_B = 5.0f; - activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; - - layer->SetAdditionalInfoForObject(activationDesc); - - // Check that the additional information can be queried from the layer - std::shared_ptr activationDescPtr = layer->GetAdditionalInformation(); - - ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - Connect(input, layer, TensorInfo(inputShape, DataType)); - Connect(layer, output, TensorInfo(outputShape, DataType)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory, modelOptions); - - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation(); - IgnoreUnused(queueDescBlobPtr); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - CHECK(queueDescriptor.m_Parameters.m_StrideX == 2); - CHECK(queueDescriptor.m_Parameters.m_StrideY == 4); - CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3); - CHECK(queueDescriptor.m_Parameters.m_PadRight == 3); - CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); - CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); - CHECK(queueDescriptor.m_Parameters.m_BiasEnabled); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); - CHECK((queueDescriptor.m_Bias->GetTensorInfo() == - TensorInfo({2}, GetBiasDataType(DataType)))); - CHECK(queueDescriptor.m_Inputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW, - const ModelOptions& modelOptions = {}) -{ - // Creates the layer we're testing. - Convolution2dDescriptor layerDesc; - layerDesc.m_PadLeft = 0; - layerDesc.m_PadRight = 0; - layerDesc.m_PadTop = 0; - layerDesc.m_PadBottom = 0; - layerDesc.m_StrideX = 1; - layerDesc.m_StrideY = 1; - layerDesc.m_BiasEnabled = false; - layerDesc.m_DataLayout = dataLayout; - - Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - TensorShape weightShape = TensorShape{32, 32, 3, 3}; - TensorShape inputShape = TensorShape{1, 32, 149, 149}; - TensorShape outputShape = TensorShape{1, 32, 147, 147}; - - layer->m_Weight = std::make_unique(TensorInfo(weightShape, DataType)); - layer->m_Bias = std::make_unique(TensorInfo({2}, GetBiasDataType(DataType))); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - Connect(input, layer, TensorInfo(inputShape, DataType)); - Connect(layer, output, TensorInfo(outputShape, DataType)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory, modelOptions); - - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_StrideX == 1); - CHECK(queueDescriptor.m_Parameters.m_StrideY == 1); - CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0); - CHECK(queueDescriptor.m_Parameters.m_PadRight == 0); - CHECK(queueDescriptor.m_Parameters.m_PadTop == 0); - CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) -{ - // This parameter setting is for withCifgWithPeepholeNoProjection - LstmDescriptor layerDesc; - layerDesc.m_ActivationFunc = 4; - layerDesc.m_ClippingThresCell = 0.0f; - layerDesc.m_ClippingThresProj = 0.0f; - layerDesc.m_CifgEnabled = true; - layerDesc.m_PeepholeEnabled = true; - layerDesc.m_ProjectionEnabled = false; - - LstmLayer* const layer = graph.AddLayer(layerDesc, "layer"); - unsigned int batchSize = 2; - unsigned int inputSize = 2; - unsigned int numUnits = 4; - unsigned int outputSize = 4; - - layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique - (TensorInfo({ numUnits, inputSize }, DataType::Float32)); - layer->m_BasicParameters.m_InputToCellWeights = std::make_unique - (TensorInfo({ numUnits, inputSize }, DataType::Float32)); - layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique - (TensorInfo({ numUnits, inputSize }, DataType::Float32)); - layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique - (TensorInfo({ numUnits, outputSize }, DataType::Float32)); - layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique - (TensorInfo({ numUnits, outputSize }, DataType::Float32)); - layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique - (TensorInfo({ numUnits, outputSize }, DataType::Float32)); - layer->m_BasicParameters.m_ForgetGateBias = std::make_unique - (TensorInfo({ numUnits }, DataType::Float32)); - layer->m_BasicParameters.m_CellBias = std::make_unique - (TensorInfo({ numUnits }, DataType::Float32)); - layer->m_BasicParameters.m_OutputGateBias = std::make_unique - (TensorInfo({ numUnits }, DataType::Float32)); - - layer->m_BasicParameters.m_InputToForgetWeights->Allocate(); - layer->m_BasicParameters.m_InputToCellWeights->Allocate(); - layer->m_BasicParameters.m_InputToOutputWeights->Allocate(); - layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate(); - layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate(); - layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate(); - layer->m_BasicParameters.m_ForgetGateBias->Allocate(); - layer->m_BasicParameters.m_CellBias->Allocate(); - layer->m_BasicParameters.m_OutputGateBias->Allocate(); - - - if (layerDesc.m_PeepholeEnabled) - { - layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique - (TensorInfo({ numUnits }, DataType::Float32)); - layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique - (TensorInfo({ numUnits }, DataType::Float32)); - layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate(); - layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate(); - } - - // create input and output layers - Layer* const input = graph.AddLayer(0, "input"); - Layer* const outputStateIn = graph.AddLayer(1, "outputStateIn"); - Layer* const cellStateIn = graph.AddLayer(2, "cellStateIn"); - Layer* const scratchBuffer = graph.AddLayer(0, "scratchBuffer"); - Layer* const outputStateOut = graph.AddLayer(1, "outputStateOut"); - Layer* const cellStateOut = graph.AddLayer(2, "cellStateOut"); - Layer* const output = graph.AddLayer(3, "output"); - - // connect up - armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32); - armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32); - armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32); - armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) }, - DataType::Float32); - Connect(input, layer, lstmTensorInfo1, 0, 0); - Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1); - Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2); - Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0); - Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0); - Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0); - Connect(layer, output, lstmTensorInfo3, 3, 0); - - CreateTensorHandles(graph, factory); - - // make the workload and check it - auto workload = MakeAndCheckWorkload(*layer, factory); - LstmQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4); - CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f); - CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f); - CHECK(queueDescriptor.m_Inputs.size() == 3); - CHECK(queueDescriptor.m_Outputs.size() == 4); - - CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize }, - DataType::Float32))); - CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits }, - DataType::Float32))); - CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32))); - return workload; -} - -template -std::unique_ptr CreateQuantizedLstmWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - auto layer = graph.AddLayer("quantizedLstmlayer"); - unsigned int numBatches = 2; - unsigned int inputSize = 2; - unsigned int outputSize = 4; - - // Scale/Offset for input/output, cellState In/Out, weights, bias - float inputOutputScale = 0.0078125f; - int32_t inputOutputOffset = 128; - - float cellStateScale = 0.00048828125f; - int32_t cellStateOffset = 0; - - float weightsScale = 0.00408021f; - int32_t weightsOffset = 100; - - float biasScale = 3.1876640625e-05f; - int32_t biasOffset = 0; - - // Weights and bias tensor and quantization info - armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - - armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, - armnn::DataType::QAsymmU8, - weightsScale, - weightsOffset); - - armnn::TensorInfo biasInfo({outputSize}, - armnn::DataType::Signed32, - biasScale, - biasOffset); - - // Weights and bias - layer->m_QuantizedLstmParameters.m_InputToInputWeights = - std::make_unique(inputWeightsInfo); - layer->m_QuantizedLstmParameters.m_InputToForgetWeights = - std::make_unique(inputWeightsInfo); - layer->m_QuantizedLstmParameters.m_InputToCellWeights = - std::make_unique(inputWeightsInfo); - layer->m_QuantizedLstmParameters.m_InputToOutputWeights = - std::make_unique(inputWeightsInfo); - - layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = - std::make_unique(recurrentWeightsInfo); - layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = - std::make_unique(recurrentWeightsInfo); - layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = - std::make_unique(recurrentWeightsInfo); - layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = - std::make_unique(recurrentWeightsInfo); - - layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique(biasInfo); - layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique(biasInfo); - layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique(biasInfo); - layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique(biasInfo); - - // Allocate weights and bias - layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate(); - layer->m_QuantizedLstmParameters.m_InputToForgetWeights->Allocate(); - layer->m_QuantizedLstmParameters.m_InputToCellWeights->Allocate(); - layer->m_QuantizedLstmParameters.m_InputToOutputWeights->Allocate(); - - layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->Allocate(); - layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Allocate(); - layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->Allocate(); - layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Allocate(); - - layer->m_QuantizedLstmParameters.m_InputGateBias->Allocate(); - layer->m_QuantizedLstmParameters.m_ForgetGateBias->Allocate(); - layer->m_QuantizedLstmParameters.m_CellBias->Allocate(); - layer->m_QuantizedLstmParameters.m_OutputGateBias->Allocate(); - - // Create input and output layers - Layer* const input = graph.AddLayer(0, "input"); - Layer* const cellStateIn = graph.AddLayer(1, "cellStateIn"); - Layer* const outputStateIn = graph.AddLayer(2, "outputStateIn"); - - Layer* const cellStateOut = graph.AddLayer(0, "cellStateOut"); - Layer* const outputStateOut = graph.AddLayer(1, "outputStateOut"); - - // Input/output tensor info and quantization info - armnn::TensorInfo inputInfo({numBatches , inputSize}, - armnn::DataType::QAsymmU8, - inputOutputScale, - inputOutputOffset); - - armnn::TensorInfo cellStateInfo({numBatches , outputSize}, - armnn::DataType::QSymmS16, - cellStateScale, - cellStateOffset); - - armnn::TensorInfo outputStateInfo({numBatches , outputSize}, - armnn::DataType::QAsymmU8, - inputOutputScale, - inputOutputOffset); - - // Connect input/output slots - Connect(input, layer, inputInfo, 0, 0); - Connect(cellStateIn, layer, cellStateInfo, 0, 1); - Connect(outputStateIn, layer, outputStateInfo, 0, 2); - - Connect(layer, cellStateOut, cellStateInfo, 0, 0); - Connect(layer, outputStateOut, outputStateInfo, 1, 0); - - CreateTensorHandles(graph, factory); - - // Create workload and check layer support - auto workload = MakeAndCheckWorkload(*layer, factory); - QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData(); - - // Validate input/output sizes - CHECK(queueDescriptor.m_Inputs.size() == 3); - CHECK(queueDescriptor.m_Outputs.size() == 2); - - // Validate weight tensor info - CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo)); - CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo)); - CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo)); - CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo)); - - CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo)); - CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo)); - CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo)); - CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo)); - - CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo)); - CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo)); - CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo)); - CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo)); - - return workload; -} - -template -std::unique_ptr CreateQLstmWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - QLstmDescriptor layerDesc; - layerDesc.m_CifgEnabled = true; - layerDesc.m_PeepholeEnabled = false; - layerDesc.m_ProjectionEnabled = false; - layerDesc.m_LayerNormEnabled = true; - - layerDesc.m_CellClip = 0.0f; - layerDesc.m_ProjectionClip = 0.0f; - - layerDesc.m_HiddenStateZeroPoint = 0; - layerDesc.m_HiddenStateScale = 0.007f; - - layerDesc.m_InputIntermediateScale = 0.007059f; - layerDesc.m_ForgetIntermediateScale = 0.007812f; - layerDesc.m_CellIntermediateScale = 0.007059f; - layerDesc.m_OutputIntermediateScale = 0.007812f; - - QLstmLayer* const layer = graph.AddLayer(layerDesc, "qLstm"); - - unsigned int numBatches = 2; - unsigned int inputSize = 4; - unsigned int numUnits = 4; - unsigned int outputSize = 4; - - // Scale/Offset quantization info - float inputScale = 0.0078125f; - int32_t inputOffset = 0; - - // if (!projectionEnabled) outputScale == hiddenStateScale - float outputScale = layerDesc.m_HiddenStateScale; - int32_t outputOffset = layerDesc.m_HiddenStateZeroPoint; - - float cellStateScale = 3.05176e-05f; - int32_t cellStateOffset = 0; - - float weightsScale = 0.00784314f; - int32_t weightsOffset = 0; - - float layerNormScale = 3.05182e-05f; - int32_t layerNormOffset = 0; - - float biasScale = layerNormScale / 1024; - int32_t biasOffset = 0; - - // Weights and bias tensor and quantization info - armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, - armnn::DataType::QSymmS8, - weightsScale, - weightsOffset); - - armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset); - - armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset); - - // Create and allocate tensors - layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(inputWeightsInfo); - layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(inputWeightsInfo); - layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique(inputWeightsInfo); - - layer->m_BasicParameters.m_RecurrentToForgetWeights = - std::make_unique(recurrentWeightsInfo); - layer->m_BasicParameters.m_RecurrentToCellWeights = - std::make_unique(recurrentWeightsInfo); - layer->m_BasicParameters.m_RecurrentToOutputWeights = - std::make_unique(recurrentWeightsInfo); - - layer->m_BasicParameters.m_ForgetGateBias = std::make_unique(biasInfo); - layer->m_BasicParameters.m_CellBias = std::make_unique(biasInfo); - layer->m_BasicParameters.m_OutputGateBias = std::make_unique(biasInfo); - - layer->m_LayerNormParameters.m_ForgetLayerNormWeights = - std::make_unique(layerNormWeightsInfo); - layer->m_LayerNormParameters.m_CellLayerNormWeights = - std::make_unique(layerNormWeightsInfo); - layer->m_LayerNormParameters.m_OutputLayerNormWeights = - std::make_unique(layerNormWeightsInfo); - - layer->m_BasicParameters.m_InputToForgetWeights->Allocate(); - layer->m_BasicParameters.m_InputToCellWeights->Allocate(); - layer->m_BasicParameters.m_InputToOutputWeights->Allocate(); - - layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate(); - layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate(); - layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate(); - - layer->m_BasicParameters.m_ForgetGateBias->Allocate(); - layer->m_BasicParameters.m_CellBias->Allocate(); - layer->m_BasicParameters.m_OutputGateBias->Allocate(); - - layer->m_LayerNormParameters.m_ForgetLayerNormWeights->Allocate(); - layer->m_LayerNormParameters.m_CellLayerNormWeights->Allocate(); - layer->m_LayerNormParameters.m_OutputLayerNormWeights->Allocate(); - - // Input and output layers - Layer* const input = graph.AddLayer(0, "input"); - Layer* const outputStateIn = graph.AddLayer(1, "outputStateIn"); - Layer* const cellStateIn = graph.AddLayer(2, "cellStateIn"); - - Layer* const outputStateOut = graph.AddLayer(0, "outputStateOut"); - Layer* const cellStateOut = graph.AddLayer(1, "cellStateOut"); - Layer* const output = graph.AddLayer(2, "output"); - - // Input/Output tensor info - armnn::TensorInfo inputInfo({numBatches , inputSize}, - armnn::DataType::QAsymmS8, - inputScale, - inputOffset); - - armnn::TensorInfo cellStateInfo({numBatches , numUnits}, - armnn::DataType::QSymmS16, - cellStateScale, - cellStateOffset); - - armnn::TensorInfo outputStateInfo({numBatches , outputSize}, - armnn::DataType::QAsymmS8, - outputScale, - outputOffset); - - // Connect layers to slots - Connect(input, layer, inputInfo, 0, 0); - Connect(outputStateIn, layer, outputStateInfo, 0, 1); - Connect(cellStateIn, layer, cellStateInfo, 0, 2); - - Connect(layer, outputStateOut, outputStateInfo, 0, 0); - Connect(layer, cellStateOut, cellStateInfo, 1, 0); - Connect(layer, output, outputStateInfo, 2, 0); - - CreateTensorHandles(graph, factory); - - // Create and check workload - auto workload = MakeAndCheckWorkload(*layer, factory); - QLstmQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f); - CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f); - CHECK(queueDescriptor.m_Inputs.size() == 3); - CHECK(queueDescriptor.m_Outputs.size() == 3); - - CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo)); - CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo)); - CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo)); - - CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo)); - CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo)); - CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo)); - - CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo)); - CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo)); - CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo)); - - return workload; -} - -template -std::unique_ptr CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - Convolution2dDescriptor layerDesc; - layerDesc.m_PadLeft = 1; - layerDesc.m_PadRight = 1; - layerDesc.m_PadTop = 1; - layerDesc.m_PadBottom = 1; - layerDesc.m_StrideX = 1; - layerDesc.m_StrideY = 1; - layerDesc.m_BiasEnabled = true; - - Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; - - layer->m_Weight = std::make_unique(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale)); - layer->m_Bias = std::make_unique - (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)); - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale)); - Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_StrideX == 1); - CHECK(queueDescriptor.m_Parameters.m_StrideY == 1); - CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1); - CHECK(queueDescriptor.m_Parameters.m_PadRight == 1); - CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); - CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); - CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3}, - DataType, inputsQScale))); - CHECK((queueDescriptor.m_Bias->GetTensorInfo() - == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale))); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateDepthwiseConvolution2dWorkloadTest( - armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) -{ - // Creates the layer we're testing. - DepthwiseConvolution2dDescriptor layerDesc; - layerDesc.m_PadLeft = 1; - layerDesc.m_PadRight = 2; - layerDesc.m_PadTop = 1; - layerDesc.m_PadBottom = 2; - layerDesc.m_StrideX = 1; - layerDesc.m_StrideY = 1; - layerDesc.m_BiasEnabled = false; - layerDesc.m_DataLayout = dataLayout; - - DepthwiseConvolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - layer->m_Weight = std::make_unique(TensorInfo({1, 4, 4, 2}, DataType)); // [ 1, H, W, I*M ] - layer->m_Weight->Allocate(); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? - TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 }; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? - TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 }; - - // Connects up. - Connect(input, layer, TensorInfo(inputShape, DataType)); - Connect(layer, output, TensorInfo(outputShape, DataType)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_StrideX == 1); - CHECK(queueDescriptor.m_Parameters.m_StrideY == 1); - CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1); - CHECK(queueDescriptor.m_Parameters.m_PadRight == 2); - CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); - CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2); - CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 4, 4, 2}, DataType))); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - FullyConnectedDescriptor layerDesc; - layerDesc.m_BiasEnabled = false; - layerDesc.m_TransposeWeightMatrix = true; - - FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; - - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique(TensorInfo({7, 20}, DataType, inputsQScale, 0)); - layer->m_Weight->Allocate(); - - armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale); - weightsTensorInfo.SetConstant(); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - auto const weights = graph.AddLayer("weights"); - Layer* const output = graph.AddLayer(0, "output"); - - weights->m_LayerOutput = std::make_unique(weightsTensorInfo); - weights->m_LayerOutput->Allocate(); - - // Connects up. - Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0); - Connect(weights, layer, weightsTensorInfo, 0, 1); - Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); - - CHECK(queueDescriptor.m_Inputs.size() == 2); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateFullyConnectedWithBlobWorkloadTest - (armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - FullyConnectedDescriptor layerDesc; - layerDesc.m_BiasEnabled = true; - layerDesc.m_TransposeWeightMatrix = true; - - FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; - - // As optimization isn't run member variables need to be updated. - layer->m_Weight = std::make_unique(TensorInfo({7, 20}, DataType, inputsQScale, 0)); - layer->m_Bias = std::make_unique(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)); - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); - - armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale); - armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale); - weightsTensorInfo.SetConstant(); - biasesTensorInfo.SetConstant(); - - auto activationDesc = std::make_shared(); - activationDesc->m_A = 10.0f; - activationDesc->m_B = 5.0f; - activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; - - layer->SetAdditionalInfoForObject(activationDesc); - - // Check that the additional information can be queried from the layer - std::shared_ptr activationDescPtr = layer->GetAdditionalInformation(); - ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); - ARMNN_ASSERT(static_cast(activationDescPtr->m_Function) == - armnn::ActivationFunction::BoundedReLu); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - auto const weights = graph.AddLayer("weights"); - auto const biases = graph.AddLayer("biases"); - Layer* const output = graph.AddLayer(0, "output"); - - weights->m_LayerOutput = std::make_unique(weightsTensorInfo); - weights->m_LayerOutput->Allocate(); - biases->m_LayerOutput = std::make_unique(biasesTensorInfo); - biases->m_LayerOutput->Allocate(); - - // Connects up. - Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0); - Connect(weights, layer, weightsTensorInfo, 0, 1); - Connect(biases, layer, biasesTensorInfo, 0, 2); - Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - - const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation(); - IgnoreUnused(queueDescBlobPtr); - - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); - ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); - ARMNN_ASSERT( - static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu - ); - - CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true); - CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); - CHECK(queueDescriptor.m_Inputs.size() == 3); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest - (armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - FullyConnectedDescriptor layerDesc; - layerDesc.m_BiasEnabled = true; - layerDesc.m_TransposeWeightMatrix = true; - layerDesc.m_ConstantWeights = false; - - FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; - - // Creates extra layers with weights and biases as input layers. - Layer* const input = graph.AddLayer(1, "input"); - Layer* const weights = graph.AddLayer(2, "weights"); - Layer* const biases = graph.AddLayer(3, "biases"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0); - Connect(weights, layer, TensorInfo({7, 20}, DataType, inputsQScale), 0, 1); - Connect(biases, layer, TensorInfo({7}, GetBiasDataType(DataType), inputsQScale), 0, 2); - Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale)); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - - CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true); - CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); - CHECK(queueDescriptor.m_Parameters.m_ConstantWeights == false); - CHECK(queueDescriptor.m_Inputs.size() == 3); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - - -template -std::unique_ptr CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW) -{ - // Creates the layer we're testing. - NormalizationDescriptor layerDesc; - layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across; - layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness; - layerDesc.m_NormSize = 3; - layerDesc.m_Alpha = 0.5f; - layerDesc.m_Beta = -1.0f; - layerDesc.m_K = 0.2f; - layerDesc.m_DataLayout = dataLayout; - - NormalizationLayer* layer = graph.AddLayer(layerDesc, "layer"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? - TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 }; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? - TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 }; - - // Connects up. - armnn::TensorInfo inputTensorInfo(inputShape, DataType); - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across)); - CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness)); - CHECK(queueDescriptor.m_Parameters.m_NormSize == 3); - CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f); - CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f); - CHECK(queueDescriptor.m_Parameters.m_K == 0.2f); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW) -{ - // Creates the layer we're testing. - Pooling2dDescriptor layerDesc; - layerDesc.m_PoolType = PoolingAlgorithm::Average; - layerDesc.m_PoolWidth = 3; - layerDesc.m_PoolHeight = 3; - layerDesc.m_PadLeft = 2; - layerDesc.m_PadRight = 2; - layerDesc.m_PadTop = 1; - layerDesc.m_PadBottom = 1; - layerDesc.m_StrideX = 2; - layerDesc.m_StrideY = 3; - layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor; - layerDesc.m_DataLayout = dataLayout; - - Pooling2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); - - // Create extra layers - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2}; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2}; - - // Connect up - Connect(input, layer, TensorInfo(inputShape, DataType)); - Connect(layer, output, TensorInfo(outputShape, DataType)); - CreateTensorHandles(graph, factory); - - // Make the workload and checks it - auto workload = MakeAndCheckWorkload(*layer, factory); - - Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average)); - CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor)); - CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3); - CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3); - CHECK(queueDescriptor.m_Parameters.m_StrideX == 2); - CHECK(queueDescriptor.m_Parameters.m_StrideY == 3); - CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2); - CHECK(queueDescriptor.m_Parameters.m_PadRight == 2); - CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); - CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Return so we can do extra, backend-specific tests - return workload; -} - -template -std::unique_ptr CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Create the layer we're testing. - SoftmaxDescriptor softmaxDescriptor; - // Set Axis to -1 if CL or Neon until further Axes are supported. - if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc) - { - softmaxDescriptor.m_Axis = -1; - } - - Layer* const layer = graph.AddLayer(softmaxDescriptor, "layer"); - // Create extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connect up - armnn::TensorInfo tensorInfo({4, 1}, DataType); - if (DataType == armnn::DataType::QAsymmU8) - { - tensorInfo.SetQuantizationOffset(0); - tensorInfo.SetQuantizationScale(1.f / 256); - } - else if (DataType == armnn::DataType::QAsymmS8) - { - tensorInfo.SetQuantizationOffset(-128); - tensorInfo.SetQuantizationScale(1.f / 256); - } - - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Make the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Return so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr - CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) -{ - // Create the layer we're testing. - // NOTE: need three dimensions channels, height/y, width/x because the Compute - // library restricts subtensors to have the same x and y dimensions as - // their parent tensors, and therefore the origin on the x and y dimension - // has to be zero for any view. So we need a third dimension to split... - // NOTE: arguments are: number of views, number of dimensions. - ViewsDescriptor layerDesc(3, 3); - // NOTE: arguments are: view, dimension, value. - layerDesc.SetViewOriginCoord(0, 0, 0); - layerDesc.SetViewOriginCoord(1, 0, 1); - layerDesc.SetViewOriginCoord(2, 0, 3); - - Layer* const layer = graph.AddLayer(layerDesc, "layer"); - - // Adds extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output0 = graph.AddLayer(0, "output0"); - Layer* const output1 = graph.AddLayer(1, "output1"); - Layer* const output2 = graph.AddLayer(2, "output2"); - - // Connects up. - armnn::TensorInfo tensorInfo({5, 7, 7}, DataType); - Connect(input, layer, tensorInfo); - - armnn::TensorInfo output0Info({1, 7, 7}, DataType); - armnn::TensorInfo output1Info({2, 7, 7}, DataType); - armnn::TensorInfo output2Info({2, 7, 7}, DataType); - - Connect(layer, output0, output0Info, 0, 0); - Connect(layer, output1, output1Info, 1, 0); - Connect(layer, output2, output2Info, 2, 0); - - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - SplitterQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 3); - CHECK(queueDescriptor.m_ViewOrigins.size() == 3); - - CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0); - CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1); - CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3); - CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0); - CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0); - CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0); - CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0); - CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0); - CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads. -template -std::pair, std::unique_ptr> - CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph) -{ - armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType); - - armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType); - armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType); - - //Constructs the graph. - Layer* const input = graph.AddLayer(0, "input"); - - armnn::ViewsDescriptor splitterViews(2); - splitterViews.SetViewOriginCoord(0, 0, 0); - splitterViews.SetViewOriginCoord(0, 1, 0); - splitterViews.SetViewOriginCoord(0, 2, 0); - splitterViews.SetViewOriginCoord(0, 3, 0); - - splitterViews.SetViewOriginCoord(1, 0, 0); - splitterViews.SetViewOriginCoord(1, 1, 1); - splitterViews.SetViewOriginCoord(1, 2, 0); - splitterViews.SetViewOriginCoord(1, 3, 0); - - // create splitter layer - Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); - CHECK(splitter); - - armnn::OriginsDescriptor concatViews(2); - concatViews.SetViewOriginCoord(0, 0, 0); - concatViews.SetViewOriginCoord(0, 1, 1); - concatViews.SetViewOriginCoord(0, 2, 0); - concatViews.SetViewOriginCoord(0, 3, 0); - - concatViews.SetViewOriginCoord(1, 0, 0); - concatViews.SetViewOriginCoord(1, 1, 0); - concatViews.SetViewOriginCoord(1, 2, 0); - concatViews.SetViewOriginCoord(1, 3, 0); - - // create concat layer - Layer* const concat = graph.AddLayer(concatViews, "concat"); - CHECK(concat); - - Layer* const output = graph.AddLayer(0, "output"); - - // Adds connections. - // connect input to splitter - Connect(input, splitter, inputTensorInfo, 0, 0); - // connect splitter[0] to concat[1] - Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up. - // connect splitter[1] to concat[0] - Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round. - // connect concat to output - Connect(concat, output, inputTensorInfo, 0, 0); - - // created tensor handles - CreateTensorHandles(graph, factory); - - // created splitter workload - auto workloadSplitter = MakeAndCheckWorkload(*splitter, factory); - CHECK(workloadSplitter); - // created concat workload - auto workloadConcat = MakeAndCheckWorkload(*concat, factory); - CHECK(workloadConcat); - - return {std::move(workloadSplitter), std::move(workloadConcat)}; -} - - -/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then -/// connected to two different activation layers -template -void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph, - std::unique_ptr& wlSplitter, - std::unique_ptr& wlActiv0_0, - std::unique_ptr& wlActiv0_1, - std::unique_ptr& wlActiv1_0, - std::unique_ptr& wlActiv1_1) -{ - armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType); - armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType); - armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType); - - //Constructs the graph. - Layer* const input = graph.AddLayer(0, "input"); - - armnn::ViewsDescriptor splitterViews(2); - - splitterViews.SetViewOriginCoord(0, 0, 0); - splitterViews.SetViewOriginCoord(0, 1, 0); - splitterViews.SetViewOriginCoord(0, 2, 0); - splitterViews.SetViewOriginCoord(0, 3, 0); - - splitterViews.SetViewOriginCoord(1, 0, 0); - splitterViews.SetViewOriginCoord(1, 1, 1); - splitterViews.SetViewOriginCoord(1, 2, 0); - splitterViews.SetViewOriginCoord(1, 3, 0); - - Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); - - armnn::ActivationDescriptor activationDesc; - - Layer* const activ0_0 = graph.AddLayer(activationDesc, "activ0_0"); - Layer* const activ0_1 = graph.AddLayer(activationDesc, "activ0_1"); - Layer* const activ1_0 = graph.AddLayer(activationDesc, "activ1_0"); - Layer* const activ1_1 = graph.AddLayer(activationDesc, "activ1_1"); - - Layer* const output1 = graph.AddLayer(1, "output1"); - Layer* const output2 = graph.AddLayer(2, "output2"); - Layer* const output3 = graph.AddLayer(3, "output3"); - Layer* const output4 = graph.AddLayer(4, "output4"); - - // Adds connections. - Connect(input, splitter, inputTensorInfo, 0, 0); - Connect(splitter, activ0_0, splitTensorInfo1, 0, 0); - Connect(splitter, activ0_1, splitTensorInfo1, 0, 0); - - Connect(splitter, activ1_0, splitTensorInfo2, 1, 0); - Connect(splitter, activ1_1, splitTensorInfo2, 1, 0); - - Connect(activ0_0, output1, splitTensorInfo1, 0, 0); - Connect(activ0_1, output2, splitTensorInfo1, 0, 0); - Connect(activ1_0, output3, splitTensorInfo2, 0, 0); - Connect(activ1_1, output4, splitTensorInfo2, 0, 0); - - CreateTensorHandles(graph, factory); - - auto workloadSplitter = MakeAndCheckWorkload(*splitter, factory); - auto workloadActiv0_0 = MakeAndCheckWorkload(*activ0_0, factory); - auto workloadActiv0_1 = MakeAndCheckWorkload(*activ0_1, factory); - auto workloadActiv1_0 = MakeAndCheckWorkload(*activ1_0, factory); - auto workloadActiv1_1 = MakeAndCheckWorkload(*activ1_1, factory); - - wlSplitter = std::move(workloadSplitter); - wlActiv0_0 = std::move(workloadActiv0_0); - wlActiv0_1 = std::move(workloadActiv0_1); - wlActiv1_0 = std::move(workloadActiv1_0); - wlActiv1_1 = std::move(workloadActiv1_1); -} - -template -std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW) -{ - TensorShape inputShape; - TensorShape outputShape; - - switch (dataLayout) { - case DataLayout::NHWC: - inputShape = { 2, 4, 4, 3 }; - outputShape = { 2, 2, 2, 3 }; - break; - case DataLayout::NCHW: - default: - inputShape = { 2, 3, 4, 4 }; - outputShape = { 2, 3, 2, 2 }; - } - - // Creates the layer we're testing. - ResizeDescriptor resizeDesc; - armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout; - resizeDesc.m_Method = ResizeMethod::Bilinear; - resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()]; - resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()]; - resizeDesc.m_DataLayout = dataLayout; - Layer* const layer = graph.AddLayer(resizeDesc, "resize"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo inputTensorInfo(inputShape, DataType); - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - auto queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - BatchToSpaceNdDescriptor desc; - Layer* const layer = graph.AddLayer(desc, "batchToSpace"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({1, 1, 1, 1}, DataType); - - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - return workload; -} - -template -std::unique_ptr CreateLogSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Create the layer we're testing. - LogSoftmaxDescriptor logSoftmaxDescriptor; - // Set Axis to -1 if CL or Neon until further Axes are supported. - if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc) - { - logSoftmaxDescriptor.m_Axis = -1; - } - - Layer* const layer = graph.AddLayer(logSoftmaxDescriptor, "layer"); - // Create extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connect up - armnn::TensorInfo tensorInfo({4, 1}, DataType); - - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - CreateTensorHandles(graph, factory); - - // Make the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Return so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) -{ - // Creates the layer we're testing. - L2NormalizationDescriptor layerDesc; - layerDesc.m_DataLayout = dataLayout; - - Layer* const layer = graph.AddLayer(layerDesc, "l2norm"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? - TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; - TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? - TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; - - // Connects up. - armnn::TensorInfo inputTensorInfo(inputShape, DataType); - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - // Creates the layer we're testing. - TensorShape outputShape({ 1, 4 }); - ReshapeDescriptor reshapeDesc; - reshapeDesc.m_TargetShape = outputShape; - Layer* const layer = graph.AddLayer(reshapeDesc, "layer"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType); - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateConvertFp16ToFp32WorkloadTest( - armnn::IWorkloadFactory& factory, armnn::Graph& graph) -{ - // Creates the layer we're testing. - ConvertFp16ToFp32Layer* const layer = graph.AddLayer("Fp16ToFp32Converter"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16); - armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateConvertFp32ToFp16WorkloadTest( - armnn::IWorkloadFactory& factory, armnn::Graph& graph) -{ - // Creates the layer we're testing. - ConvertFp32ToFp16Layer* const layer = graph.AddLayer("Fp32ToFp16Converter"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateMeanWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) -{ - // Reduce along the first and second dimensions, and do not keep the reduced dimensions. - MeanDescriptor descriptor({ 1, 2 }, false); - - // Creates the layer we're testing. - Layer* const layer = graph.AddLayer(descriptor, "mean"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo inputTensorInfo({ 1, 3, 7, 4 }, DataType); - armnn::TensorInfo outputTensorInfo({ 1, 4 }, DataType); - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - MeanQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis); - CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory, - armnn::Graph &graph, - const armnn::TensorShape &outputShape, - unsigned int concatAxis) -{ - armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType); - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - - // Constructs the graph. - Layer* const input0 = graph.AddLayer(0, "input0"); - Layer* const input1 = graph.AddLayer(1, "input1"); - armnn::OriginsDescriptor descriptor; - - std::vector inputShapes{{ 2, 3, 2, 5 }, { 2, 3, 2, 5 }}; - - descriptor = CreateDescriptorForConcatenation(inputShapes.begin(), - inputShapes.end(), - concatAxis); - - // create concat layer - Layer* const concat = graph.AddLayer(descriptor, "concat"); - CHECK(concat); - - Layer* const output = graph.AddLayer(0, "output"); - - // Adds connections. - // connect input0 to concat - Connect(input0, concat, inputTensorInfo, 0, 0); - // connect input1 to concat - Connect(input1, concat, inputTensorInfo, 0, 1); - // connect concat to output - Connect(concat, output, outputTensorInfo, 0, 0); - - // create tensor handles - CreateTensorHandles(graph, factory); - - // create concat workload - auto workloadConcat = MakeAndCheckWorkload(*concat, factory); - CHECK(workloadConcat); - - return workloadConcat; -} - -template -std::pair> CreatePreCompiledWorkloadTest( - armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - bool biasEnabled = false) -{ - IgnoreUnused(graph); - - // build up the structure of the network - armnn::INetworkPtr net(armnn::INetwork::Create()); - - // Add an input layer - armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer"); - CHECK(inputLayer); - - // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW - // ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC - // this test is using NHWC, so the weights shape is OHWI - TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0, true); - unsigned int weightsLength = weightsTensorInfo.GetNumElements(); - - using WeightType = armnn::ResolveType; - std::vector convWeightsData(weightsLength); - for (unsigned int i = 0; i < weightsLength; ++i) - { - convWeightsData[i] = static_cast(i); - } - - armnn::ConstTensor weights(weightsTensorInfo, convWeightsData); - - // Add a layer that can be used in the PreCompiled layer - armnn::Convolution2dDescriptor convDesc2d; - convDesc2d.m_StrideX = 1; - convDesc2d.m_StrideY = 1; - convDesc2d.m_BiasEnabled = biasEnabled; - convDesc2d.m_DataLayout = armnn::DataLayout::NHWC; - - armnn::IConnectableLayer* convLayer = nullptr; - const std::string convLayerName("conv layer"); - - if (biasEnabled) - { - constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ? - armnn::DataType::Signed32 : armnn::DataType::Float32; - - TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0, true); - unsigned int biasLength = biasTensorInfo.GetNumElements(); - - using BiasType = armnn::ResolveType; - std::vector biasData(biasLength); - std::fill(biasData.begin(), biasData.end(), static_cast(0)); - - armnn::ConstTensor biases(biasTensorInfo, biasData); - - // Create convolution layer with biases - convLayer = net->AddConvolution2dLayer(convDesc2d, - weights, - Optional(biases), - convLayerName.c_str()); - } - else - { - // Create convolution layer without biases - convLayer = net->AddConvolution2dLayer(convDesc2d, - weights, - EmptyOptional(), - convLayerName.c_str()); - } - - CHECK(convLayer); - - // Add an output layer - armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer"); - CHECK(outputLayer); - - // set the tensors in the network (NHWC format) - TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType); - if (dataType == armnn::DataType::QAsymmU8) - { - inputTensorInfo.SetQuantizationOffset(0); - inputTensorInfo.SetQuantizationScale(0.9f); - } - - TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType); - if (dataType == armnn::DataType::QAsymmU8) - { - outputTensorInfo.SetQuantizationOffset(0); - outputTensorInfo.SetQuantizationScale(0.9f); - } - - // Connect the layers - inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); - - convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - - // Optimize the network for the backend supported by the factory - std::vector backends = {factory.GetBackendId()}; - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - armnn::OptimizerOptions optimizerOptions; - armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(), - optimizerOptions); - CHECK(optimizedNet != nullptr); - - // Find the PreCompiled layer in the optimised graph - armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get()); - Layer* preCompiledLayer = nullptr; - for (auto& layer : optimisedGraph) - { - if (layer->GetType() == LayerType::PreCompiled) - { - preCompiledLayer = layer; - } - } - CHECK(preCompiledLayer != nullptr); - - // Create the TensorHandles. - CreateTensorHandles(optimisedGraph, factory); - - // Make the workload and check it. - auto workload = MakeAndCheckWorkload(*preCompiledLayer, factory); - - PreCompiledQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns the workload so we can do extra, backend-specific tests. - // NOTE: We need to return the optimised network as well, otherwise it gets - // out of scope and the tensor handles get destructed - return std::make_pair(std::move(optimizedNet), std::move(workload)); -} - -template -std::unique_ptr CreateConstantWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - const armnn::TensorShape& outputShape) -{ - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - - // create constant layer - auto constant = graph.AddLayer("constant"); - CHECK(constant); - constant->m_LayerOutput = std::make_unique(outputTensorInfo); - - Layer* const output = graph.AddLayer(0, "output"); - - // Adds connections. - // connect constant to output - Connect(constant, output, outputTensorInfo, 0, 0); - - // create tensor handles - CreateTensorHandles(graph, factory); - - // create Constant workload" - auto workloadConstant = MakeAndCheckWorkload(*constant, factory); - CHECK(workloadConstant); - - return workloadConstant; -} - -template -std::unique_ptr CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - const armnn::TensorShape& inputShape, - const armnn::TensorShape& alphaShape, - const armnn::TensorShape& outputShape, - armnn::DataType dataType) -{ - // Creates the PReLU layer - Layer* const layer = graph.AddLayer("prelu"); - CHECK(layer != nullptr); - - // Creates extra layers - Layer* const input = graph.AddLayer (0, "input"); - Layer* const alpha = graph.AddLayer (1, "alpha"); - Layer* const output = graph.AddLayer(0, "output"); - CHECK(input != nullptr); - CHECK(alpha != nullptr); - CHECK(output != nullptr); - - // Connects up - armnn::TensorInfo inputTensorInfo (inputShape, dataType); - armnn::TensorInfo alphaTensorInfo (alphaShape, dataType); - armnn::TensorInfo outputTensorInfo(outputShape, dataType); - Connect(input, layer, inputTensorInfo, 0, 0); - Connect(alpha, layer, alphaTensorInfo, 0, 1); - Connect(layer, output, outputTensorInfo, 0, 0); - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it - auto workload = MakeAndCheckWorkload(*layer, factory); - - PreluQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 2); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - // Returns so we can do extra, backend-specific tests. - return workload; -} - -template -std::unique_ptr CreateSpaceToDepthWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - SpaceToDepthDescriptor desc; - desc.m_BlockSize = 2; - Layer* const layer = graph.AddLayer(desc, "spaceToDepth"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo inputTensorInfo({ 1, 2, 2, 1 }, DataType); - armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 4 }, DataType); - - Connect(input, layer, inputTensorInfo); - Connect(layer, output, outputTensorInfo); - - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == 1); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - return workload; -} - -template -std::unique_ptr CreateStackWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - const armnn::TensorShape& inputShape, - const armnn::TensorShape& outputShape, - unsigned int axis, - unsigned int numInputs) -{ - armnn::TensorInfo inputTensorInfo(inputShape, DataType); - armnn::TensorInfo outputTensorInfo(outputShape, DataType); - - // Constructs the Stack layer. - armnn::StackDescriptor descriptor(axis, numInputs, inputShape); - Layer* const stackLayer = graph.AddLayer(descriptor, "stack"); - CHECK(stackLayer != nullptr); - - // Constructs layer inputs and output. - std::vector inputs; - for (unsigned int i=0; i( - static_cast(i), - ("input" + std::to_string(i)).c_str() - )); - CHECK(inputs[i] != nullptr); - } - Layer* const output = graph.AddLayer(0, "output"); - CHECK(output != nullptr); - - // Adds connections. - for (unsigned int i=0; i(*stackLayer, factory); - StackQueueDescriptor queueDescriptor = stackWorkload->GetData(); - CHECK(queueDescriptor.m_Inputs.size() == numInputs); - CHECK(queueDescriptor.m_Outputs.size() == 1); - - return stackWorkload; -} - -} // Anonymous namespace +// This file is deprecated and will be removed soon. +// Please use the new header in armnnTestUtils instead. +// This will use the new armnnTestUtils header. +#include "../../armnnTestUtils/CreateWorkload.hpp" \ No newline at end of file diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp index f3753398b4..d246a082ec 100644 --- a/src/armnn/test/GraphTests.cpp +++ b/src/armnn/test/GraphTests.cpp @@ -2,7 +2,7 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include "GraphUtils.hpp" +#include #include #include diff --git a/src/armnn/test/GraphUtils.cpp b/src/armnn/test/GraphUtils.cpp deleted file mode 100644 index bc6b562c9d..0000000000 --- a/src/armnn/test/GraphUtils.cpp +++ /dev/null @@ -1,78 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "GraphUtils.hpp" - -#include - -bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name) -{ - for (auto&& layer : graph) - { - if (layer->GetName() == name) - { - return true; - } - } - return false; -} - -armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name) -{ - for (auto&& layer : graph) - { - if (layer->GetNameStr() == name) - { - return layer; - } - } - return nullptr; -} - -bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num) -{ - return layer->GetNumInputSlots() == num; -} - -bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num) -{ - return layer->GetNumOutputSlots() == num; -} - -bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer, - unsigned int srcSlot, unsigned int destSlot, - const armnn::TensorInfo& expectedTensorInfo) -{ - const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(srcSlot); - const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo(); - if (expectedTensorInfo != tensorInfo) - { - return false; - } - const unsigned int numConnections = outputSlot.GetNumConnections(); - for (unsigned int c = 0; c < numConnections; ++c) - { - auto inputSlot = armnn::PolymorphicDowncast(outputSlot.GetConnection(c)); - if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() && - inputSlot->GetSlotIndex() == destSlot) - { - return true; - } - } - return false; -} - -/// Checks that first comes before second in the order. -bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second) -{ - graph.Print(); - - const auto& order = graph.TopologicalSort(); - - auto firstPos = std::find(order.begin(), order.end(), first); - auto secondPos = std::find(firstPos, order.end(), second); - - return (secondPos != order.end()); -} diff --git a/src/armnn/test/GraphUtils.hpp b/src/armnn/test/GraphUtils.hpp index 60d03dca23..02954e3d1f 100644 --- a/src/armnn/test/GraphUtils.hpp +++ b/src/armnn/test/GraphUtils.hpp @@ -1,25 +1,9 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include - -#include - - -bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name); - -armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name); - -bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num); - -bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num); - -bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer, - unsigned int srcSlot, unsigned int destSlot, - const armnn::TensorInfo& expectedTensorInfo); - -bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second); +#include "../../armnnTestUtils/GraphUtils.hpp" +#pragma message("src/armnn/test/GraphUtils.hpp has been deprecated, it is due for removal in 22.08 release." \ + " Please use from armnnTestUtils library, /src/armnnTestUtils/GraphUtils.hpp) diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp index f8d8e89555..c7c0c6d2a7 100644 --- a/src/armnn/test/InferOutputTests.cpp +++ b/src/armnn/test/InferOutputTests.cpp @@ -5,7 +5,7 @@ #include "InferOutputTests.hpp" -#include +#include TEST_SUITE("LayerValidateOutput") { diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp index 6435d87be3..799739b9ef 100644 --- a/src/armnn/test/InferOutputTests.hpp +++ b/src/armnn/test/InferOutputTests.hpp @@ -5,7 +5,7 @@ #pragma once -#include "TestUtils.hpp" +#include #include #include diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index c1927e3601..d4edf5da97 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "GraphUtils.hpp" +#include #include diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 750e6967ad..a5db0ac0b0 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "TestUtils.hpp" +#include #include #include diff --git a/src/armnn/test/PredicateResult.hpp b/src/armnn/test/PredicateResult.hpp index a344c8e3ad..8edf8b1180 100644 --- a/src/armnn/test/PredicateResult.hpp +++ b/src/armnn/test/PredicateResult.hpp @@ -2,47 +2,8 @@ // Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include +#include -namespace armnn -{ - -class PredicateResult -{ -public: - explicit PredicateResult(bool result) - : m_Result(result) - {} - - PredicateResult(const PredicateResult& predicateResult) - : m_Result(predicateResult.m_Result) - , m_Message(predicateResult.m_Message.str()) - {} - - void SetResult(bool newResult) - { - m_Result = newResult; - } - - std::stringstream& Message() - { - return m_Message; - } - - bool operator!() const - { - return !m_Result; - } - - void operator=(PredicateResult otherPredicateResult) - { - otherPredicateResult.m_Result = m_Result; - } - - bool m_Result; - std::stringstream m_Message; -}; - -} // namespace armnn \ No newline at end of file +#pragma message("src/armnn/test/PredicateResult.hpp has been deprecated, it is due for removal in 22.08 release." \ + " Please use public interface include/armnnTestUtils/PredicateResult.hpp") \ No newline at end of file diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index f055f2368b..045007b5c9 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -22,7 +22,7 @@ #include #include "RuntimeTests.hpp" -#include "TestUtils.hpp" +#include namespace armnn { diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index 95cea58b30..626cda3d1c 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -1,235 +1,9 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include "PredicateResult.hpp" - -#include -#include -#include - -#include - -#include - -#include -#include -#include -#include - -constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f; - -template -struct SelectiveComparer -{ - static bool Compare(T a, T b) - { - return (std::max(a, b) - std::min(a, b)) <= 1; - } - -}; - -template -struct SelectiveComparer -{ - static bool Compare(T a, T b) - { - // If a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead. - if (a == 0.0f || b == 0.0f) - { - return std::abs(a - b) <= g_FloatCloseToZeroTolerance; - } - - if (std::isinf(a) && a == b) - { - return true; - } - - if (std::isnan(a) && std::isnan(b)) - { - return true; - } - - // For unquantized floats we use a tolerance of 1%. - return armnnUtils::within_percentage_tolerance(a, b); - } -}; - -template -bool SelectiveCompare(T a, T b) -{ - return SelectiveComparer()>::Compare(a, b); -}; - -template -bool SelectiveCompareBoolean(T a, T b) -{ - return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0))); -}; - -template -armnn::PredicateResult CompareTensors(const std::vector& actualData, - const std::vector& expectedData, - const armnn::TensorShape& actualShape, - const armnn::TensorShape& expectedShape, - bool compareBoolean = false, - bool isDynamic = false) -{ - if (actualData.size() != expectedData.size()) - { - armnn::PredicateResult res(false); - res.Message() << "Different data size [" - << actualData.size() - << "!=" - << expectedData.size() - << "]"; - return res; - } - - if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions()) - { - armnn::PredicateResult res(false); - res.Message() << "Different number of dimensions [" - << actualShape.GetNumDimensions() - << "!=" - << expectedShape.GetNumDimensions() - << "]"; - return res; - } - - if (actualShape.GetNumElements() != expectedShape.GetNumElements()) - { - armnn::PredicateResult res(false); - res.Message() << "Different number of elements [" - << actualShape.GetNumElements() - << "!=" - << expectedShape.GetNumElements() - << "]"; - return res; - } - - unsigned int numberOfDimensions = actualShape.GetNumDimensions(); - - if (!isDynamic) - { - // Checks they are same shape. - for (unsigned int i = 0; i < numberOfDimensions; ++i) - { - if (actualShape[i] != expectedShape[i]) - { - armnn::PredicateResult res(false); - res.Message() << "Different shapes [" - << actualShape[i] - << "!=" - << expectedShape[i] - << "]"; - return res; - } - } - } - - // Fun iteration over n dimensions. - std::vector indices; - for (unsigned int i = 0; i < numberOfDimensions; i++) - { - indices.emplace_back(0); - } - - std::stringstream errorString; - int numFailedElements = 0; - constexpr int maxReportedDifferences = 3; - unsigned int index = 0; - - // Compare data element by element. - while (true) - { - bool comparison; - // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans. - if(compareBoolean) - { - comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]); - } - else - { - comparison = SelectiveCompare(actualData[index], expectedData[index]); - } - - if (!comparison) - { - ++numFailedElements; - - if (numFailedElements <= maxReportedDifferences) - { - if (numFailedElements >= 2) - { - errorString << ", "; - } - errorString << "["; - for (unsigned int i = 0; i < numberOfDimensions; ++i) - { - errorString << indices[i]; - if (i != numberOfDimensions - 1) - { - errorString << ","; - } - } - errorString << "]"; - - errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")"; - } - } - - ++indices[numberOfDimensions - 1]; - for (unsigned int i=numberOfDimensions-1; i>0; i--) - { - if (indices[i] == actualShape[i]) - { - indices[i] = 0; - ++indices[i - 1]; - } - } - if (indices[0] == actualShape[0]) - { - break; - } - - index++; - } - - armnn::PredicateResult comparisonResult(true); - if (numFailedElements > 0) - { - comparisonResult.SetResult(false); - comparisonResult.Message() << numFailedElements << " different values at: "; - if (numFailedElements > maxReportedDifferences) - { - errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)"; - } - comparisonResult.Message() << errorString.str(); - } - - return comparisonResult; -} - -template -std::vector MakeRandomTensor(const armnn::TensorInfo& tensorInfo, - unsigned int seed, - float min = -10.0f, - float max = 10.0f) -{ - std::mt19937 gen(seed); - std::uniform_real_distribution dist(min, max); - - std::vector init(tensorInfo.GetNumElements()); - for (unsigned int i = 0; i < init.size(); i++) - { - init[i] = dist(gen); - } - - const float qScale = tensorInfo.GetQuantizationScale(); - const int32_t qOffset = tensorInfo.GetQuantizationOffset(); - - return armnnUtils::QuantizedVector(init, qScale, qOffset); -} +// This file is deprecated and will be removed soon. +// Please use the new header in armnnTestUtils instead. +// This will use the new armnnTestUtils header. +#include "../../armnnTestUtils/TensorHelpers.hpp" \ No newline at end of file diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp deleted file mode 100644 index 97cc80c8a2..0000000000 --- a/src/armnn/test/TestUtils.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "TestUtils.hpp" - -#include - -using namespace armnn; - -void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, - unsigned int fromIndex, unsigned int toIndex) -{ - ARMNN_ASSERT(from); - ARMNN_ASSERT(to); - - try - { - from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); - } - catch (const std::out_of_range& exc) - { - std::ostringstream message; - - if (to->GetType() == armnn::LayerType::FullyConnected && toIndex == 2) - { - message << "Tried to connect bias to FullyConnected layer when bias is not enabled: "; - } - - message << "Failed to connect to input slot " - << toIndex - << " on " - << GetLayerTypeAsCString(to->GetType()) - << " layer " - << std::quoted(to->GetName()) - << " as the slot does not exist or is unavailable"; - throw LayerValidationException(message.str()); - } - - from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo); -} - -namespace armnn -{ - -Graph& GetGraphForTesting(IOptimizedNetwork* optNet) -{ - return optNet->pOptimizedNetworkImpl->GetGraph(); -} - -ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNet) -{ - return optNet->pOptimizedNetworkImpl->GetModelOptions(); -} - -profiling::ProfilingService& GetProfilingService(armnn::RuntimeImpl* runtime) -{ - return runtime->m_ProfilingService; -} - -} \ No newline at end of file diff --git a/src/armnn/test/TestUtils.hpp b/src/armnn/test/TestUtils.hpp index fa9156bc09..fe5331ec3d 100644 --- a/src/armnn/test/TestUtils.hpp +++ b/src/armnn/test/TestUtils.hpp @@ -1,58 +1,9 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once +#include "../../armnnTestUtils/TestUtils.hpp" -#include -#include -#include - -void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, - unsigned int fromIndex = 0, unsigned int toIndex = 0); - -template -bool IsLayerOfType(const armnn::Layer* const layer) -{ - return (layer->GetType() == armnn::LayerEnumOf()); -} - -inline bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last) -{ - return (first == last); -} - -/// Checks each unary function in Us evaluates true for each correspondent layer in the sequence [first, last). -template -bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last, U&& u, Us&&... us) -{ - return u(*first) && CheckSequence(std::next(first), last, us...); -} - -template -bool CheckRelatedLayers(armnn::Graph& graph, const std::list& testRelatedLayers) -{ - for (auto& layer : graph) - { - if (layer->GetType() == armnn::LayerEnumOf()) - { - auto& relatedLayers = layer->GetRelatedLayerNames(); - if (!std::equal(relatedLayers.begin(), relatedLayers.end(), testRelatedLayers.begin(), - testRelatedLayers.end())) - { - return false; - } - } - } - - return true; -} - -namespace armnn -{ -Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr); -ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr); -profiling::ProfilingService& GetProfilingService(RuntimeImpl* runtime); - -} // namespace armnn \ No newline at end of file +#pragma message("src/armnn/test/TestUtils.hpp has been deprecated, it is due for removal in 22.08 release." \ + " Please use from armnnTestUtils library, /src/armnnTestUtils/TestUtils.hpp) \ No newline at end of file diff --git a/src/armnn/test/UnitTests.cpp b/src/armnn/test/UnitTests.cpp deleted file mode 100644 index cf532a76fd..0000000000 --- a/src/armnn/test/UnitTests.cpp +++ /dev/null @@ -1,67 +0,0 @@ -// -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN -#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN -#endif -#include - -#include "UnitTests.hpp" - -struct ConfigureLoggingFixture -{ - ConfigureLoggingFixture() - { - ConfigureLoggingTest(); - } -}; - - - -TEST_SUITE("LoggerSuite") -{ -TEST_CASE_FIXTURE(ConfigureLoggingFixture, "LoggerTest") -{ - std::stringstream ss; - { - struct StreamRedirector - { - public: - StreamRedirector(std::ostream& stream, std::streambuf* newStreamBuffer) - : m_Stream(stream) - , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) - {} - ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); } - - private: - std::ostream& m_Stream; - std::streambuf* m_BackupBuffer; - }; - - StreamRedirector redirect(std::cout, ss.rdbuf()); - - using namespace armnn; - SetLogFilter(LogSeverity::Trace); - SetAllLoggingSinks(true, false, false); - - ARMNN_LOG(trace) << "My trace message; " << -2; - ARMNN_LOG(debug) << "My debug message; " << -1; - ARMNN_LOG(info) << "My info message; " << 0; - ARMNN_LOG(warning) << "My warning message; " << 1; - ARMNN_LOG(error) << "My error message; " << 2; - ARMNN_LOG(fatal) << "My fatal message; " << 3; - - SetLogFilter(LogSeverity::Fatal); - } - - CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos); - CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos); - CHECK(ss.str().find("Info: My info message; 0") != std::string::npos); - CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos); - CHECK(ss.str().find("Error: My error message; 2") != std::string::npos); - CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos); -} - -} \ No newline at end of file diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp index e4a8b96b52..129a766729 100644 --- a/src/armnn/test/UnitTests.hpp +++ b/src/armnn/test/UnitTests.hpp @@ -2,187 +2,8 @@ // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include -#include -#include -#include +#include "../../armnnTestUtils/UnitTests.hpp" -#include -#include - -#include "TensorHelpers.hpp" - -#include - -inline void ConfigureLoggingTest() -{ - // Configures logging for both the ARMNN library and this test program. - armnn::ConfigureLogging(true, true, armnn::LogSeverity::Fatal); -} - -// The following macros require the caller to have defined FactoryType, with one of the following using statements: -// -// using FactoryType = armnn::RefWorkloadFactory; -// using FactoryType = armnn::ClWorkloadFactory; -// using FactoryType = armnn::NeonWorkloadFactory; - -/// Executes CHECK_MESSAGE on CompareTensors() return value so that the predicate_result message is reported. -/// If the test reports itself as not supported then the tensors are not compared. -/// Additionally this checks that the supportedness reported by the test matches the name of the test. -/// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name. -/// This is useful because it clarifies that the feature being tested is not actually supported -/// (a passed test with the name of a feature would imply that feature was supported). -/// If support is added for a feature, the test case will fail because the name incorrectly contains UNSUPPORTED. -/// If support is removed for a feature, the test case will fail because the name doesn't contain UNSUPPORTED. -template -void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult& testResult) -{ - bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; - CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported, - "The test name does not match the supportedness it is reporting"); - if (testResult.m_Supported) - { - auto result = CompareTensors(testResult.m_ActualData, - testResult.m_ExpectedData, - testResult.m_ActualShape, - testResult.m_ExpectedShape, - testResult.m_CompareBoolean); - CHECK_MESSAGE(result.m_Result, result.m_Message.str()); - } -} - -template -void CompareTestResultIfSupported(const std::string& testName, const std::vector>& testResult) -{ - bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; - for (unsigned int i = 0; i < testResult.size(); ++i) - { - CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported, - "The test name does not match the supportedness it is reporting"); - if (testResult[i].m_Supported) - { - auto result = CompareTensors(testResult[i].m_ActualData, - testResult[i].m_ExpectedData, - testResult[i].m_ActualShape, - testResult[i].m_ExpectedShape); - CHECK_MESSAGE(result.m_Result, result.m_Message.str()); - } - } -} - -template -void RunTestFunction(const char* testName, TFuncPtr testFunction, Args... args) -{ - std::unique_ptr profiler = std::make_unique(); - armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get()); - - auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); - FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); - - auto testResult = (*testFunction)(workloadFactory, memoryManager, args...); - CompareTestResultIfSupported(testName, testResult); - - armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr); -} - - -template -void RunTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args) -{ - std::unique_ptr profiler = std::make_unique(); - armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get()); - - auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); - FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); - - auto tensorHandleFactory = WorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); - - auto testResult = (*testFunction)(workloadFactory, memoryManager, tensorHandleFactory, args...); - CompareTestResultIfSupported(testName, testResult); - - armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr); -} - -#define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \ - TEST_CASE(#TestName) \ - { \ - TestFunction(); \ - } - -#define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \ - TEST_CASE(#TestName) \ - { \ - RunTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -#define ARMNN_AUTO_TEST_FIXTURE(TestName, Fixture, TestFunction, ...) \ - TEST_CASE_FIXTURE(Fixture, #TestName) \ - { \ - RunTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -#define ARMNN_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \ - TEST_CASE(#TestName) \ - { \ - RunTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -#define ARMNN_AUTO_TEST_FIXTURE_WITH_THF(TestName, Fixture, TestFunction, ...) \ - TEST_CASE_FIXTURE(Fixture, #TestName) \ - { \ - RunTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -template -void CompareRefTestFunction(const char* testName, TFuncPtr testFunction, Args... args) -{ - auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); - FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); - - armnn::RefWorkloadFactory refWorkloadFactory; - - auto testResult = (*testFunction)(workloadFactory, memoryManager, refWorkloadFactory, args...); - CompareTestResultIfSupported(testName, testResult); -} - -template -void CompareRefTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args) -{ - auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); - FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); - - armnn::RefWorkloadFactory refWorkloadFactory; - auto tensorHandleFactory = WorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); - auto refTensorHandleFactory = - RefWorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); - - auto testResult = (*testFunction)( - workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, args...); - CompareTestResultIfSupported(testName, testResult); -} - -#define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \ - TEST_CASE(#TestName) \ - { \ - CompareRefTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -#define ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \ - TEST_CASE(#TestName) \ - { \ - CompareRefTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \ - TEST_CASE_FIXTURE(Fixture, #TestName) \ - { \ - CompareRefTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ - } - -#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(TestName, Fixture, TestFunction, ...) \ - TEST_CASE_FIXTURE(Fixture, #TestName) \ - { \ - CompareRefTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ - } +#pragma message("src/armnn/test/UnitTests.hpp has been deprecated, it is due for removal in 22.08 release." \ + " Please use from armnnTestUtils library, /src/armnnTestUtils/UnitTests.hpp) \ No newline at end of file diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp index 7573005518..0636a00234 100644 --- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp +++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MIT // -#include "../GraphUtils.hpp" -#include "../TestUtils.hpp" +#include +#include #include diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp index 7b326fa8bc..4aacf7f4fe 100644 --- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include #include diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp index f74ab0f308..531a0dd92a 100644 --- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include #include diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp index c4551525c1..4c453cc799 100644 --- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index a598983706..a64660f987 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -5,7 +5,7 @@ #include "LayersFwd.hpp" #include -#include +#include #include #include #include diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp index 63cd170f02..37d770190a 100644 --- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp +++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp index e2ac1bd69e..bc8839948b 100644 --- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp +++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp index 54a9d9a189..99b2b80556 100644 --- a/src/armnn/test/optimizations/FuseActivationTests.cpp +++ b/src/armnn/test/optimizations/FuseActivationTests.cpp @@ -8,8 +8,8 @@ #include #include #include -#include "test/GraphUtils.hpp" -#include +#include +#include #include diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp index 0e969c1a5c..70cffea2b2 100644 --- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp +++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include diff --git a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp index 03d0d22f95..523ffcf44f 100644 --- a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp +++ b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp index 38a65a6173..152e79925b 100644 --- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp +++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp index 68d277a4bd..09bf9ae7d9 100644 --- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp +++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp index 694b103091..599b44aa3e 100644 --- a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp +++ b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp index 4b6dfe582b..1e03140b38 100644 --- a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp +++ b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp index 98c84d4fc2..cfd1a23411 100644 --- a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp +++ b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp index f862315220..d87d3f08b5 100644 --- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp +++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include #include diff --git a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp index fdd0a6ddd3..b143078e67 100644 --- a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp +++ b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp index 46b06a55c7..b3f9ed8780 100644 --- a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp +++ b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp index 692f371356..cf1dfa0d10 100644 --- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp +++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp @@ -3,8 +3,8 @@ // SPDX-License-Identifier: MIT // -#include "../GraphUtils.hpp" -#include "../TestUtils.hpp" +#include +#include #include diff --git a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp index 069d28457e..e66bb75b36 100644 --- a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp +++ b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp index 5d1d950573..371f3acadd 100644 --- a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp +++ b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "../TestUtils.hpp" +#include #include diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp index f4600596c8..21809eb0f1 100644 --- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp +++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp @@ -6,7 +6,7 @@ #pragma once #include "SchemaSerialize.hpp" -#include "test/TensorHelpers.hpp" +#include "TensorHelpers.hpp" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" diff --git a/src/armnnTestUtils/CMakeLists.txt b/src/armnnTestUtils/CMakeLists.txt new file mode 100755 index 0000000000..3738fad033 --- /dev/null +++ b/src/armnnTestUtils/CMakeLists.txt @@ -0,0 +1,50 @@ +# +# Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +# SPDX-License-Identifier: MIT +# + +# armnnTestUtils library provides useful test functions for backend developers. +set(armnnTestUtils_sources) +list(APPEND armnnTestUtils_sources + ../../include/armnnTestUtils/DataLayoutUtils.hpp + ../../include/armnnTestUtils/LayerTestResult.hpp + ../../include/armnnTestUtils/PredicateResult.hpp + ../../include/armnnTestUtils/TensorCopyUtils.hpp + TensorHelpers.hpp + CreateWorkload.hpp + CommonTestUtils.cpp + CommonTestUtils.hpp + DataTypeUtils.hpp + GraphUtils.cpp + GraphUtils.hpp + TensorCopyUtils.cpp + TestUtils.cpp + TestUtils.hpp + UnitTests.cpp + UnitTests.hpp + WorkloadTestUtils.hpp + ) + +add_library_ex(armnnTestUtils SHARED ${armnnTestUtils_sources}) + +set_target_properties(armnnTestUtils PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}) + +target_include_directories(armnnTestUtils + PUBLIC + $ + $ + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src) + +target_include_directories(armnnTestUtils PRIVATE ../armnn) +target_include_directories(armnnTestUtils PRIVATE ../armnnUtils) +target_include_directories(armnnTestUtils PRIVATE ../backends) +target_include_directories(armnnTestUtils PRIVATE ../profiling) + +install(TARGETS armnnTestUtils + EXPORT armnn-targets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + +add_library(Armnn::armnnTestUtils ALIAS armnnTestUtils) \ No newline at end of file diff --git a/src/armnnTestUtils/CommonTestUtils.cpp b/src/armnnTestUtils/CommonTestUtils.cpp new file mode 100644 index 0000000000..c85330577d --- /dev/null +++ b/src/armnnTestUtils/CommonTestUtils.cpp @@ -0,0 +1,70 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "CommonTestUtils.hpp" + +#include + +using namespace armnn; + +SubgraphView::InputSlots CreateInputsFrom(const std::vector& layers) +{ + SubgraphView::InputSlots result; + for (auto&& layer : layers) + { + for (auto&& it = layer->BeginInputSlots(); it != layer->EndInputSlots(); ++it) + { + result.push_back(&(*it)); + } + } + return result; +} + +SubgraphView::OutputSlots CreateOutputsFrom(const std::vector& layers) +{ + SubgraphView::OutputSlots result; + for (auto && layer : layers) + { + for (auto&& it = layer->BeginOutputSlots(); it != layer->EndOutputSlots(); ++it) + { + result.push_back(&(*it)); + } + } + return result; +} + +SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::InputSlots&& inputs, + SubgraphView::OutputSlots&& outputs, + SubgraphView::Layers&& layers) +{ + return std::make_unique(std::move(inputs), std::move(outputs), std::move(layers)); +} + +armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId) +{ + auto& backendRegistry = BackendRegistryInstance(); + auto backendFactory = backendRegistry.GetFactory(backendId); + auto backendObjPtr = backendFactory(); + + return backendObjPtr; +} + +armnn::TensorShape MakeTensorShape(unsigned int batches, + unsigned int channels, + unsigned int height, + unsigned int width, + armnn::DataLayout layout) +{ + using namespace armnn; + switch (layout) + { + case DataLayout::NCHW: + return TensorShape{ batches, channels, height, width }; + case DataLayout::NHWC: + return TensorShape{ batches, height, width, channels }; + default: + throw InvalidArgumentException(std::string("Unsupported data layout: ") + GetDataLayoutName(layout)); + } +} diff --git a/src/armnnTestUtils/CommonTestUtils.hpp b/src/armnnTestUtils/CommonTestUtils.hpp new file mode 100644 index 0000000000..a4babc5568 --- /dev/null +++ b/src/armnnTestUtils/CommonTestUtils.hpp @@ -0,0 +1,119 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include + +// Checks that two collections have the exact same contents (in any order) +// The given collections do not have to contain duplicates +// Cannot use std::sort here because std lists have their own std::list::sort method +template +bool AreEqual(const CollectionType& lhs, const CollectionType& rhs) +{ + if (lhs.size() != rhs.size()) + { + return false; + } + + auto lhs_it = std::find_if(lhs.begin(), lhs.end(), [&rhs](auto& item) + { + return std::find(rhs.begin(), rhs.end(), item) == rhs.end(); + }); + + return lhs_it == lhs.end(); +} + +// Checks that the given collection contains the specified item +template +bool Contains(const CollectionType& collection, const typename CollectionType::value_type& item) +{ + return std::find(collection.begin(), collection.end(), item) != collection.end(); +} + +// Checks that the given map contains the specified key +template +bool Contains(const MapType& map, const typename MapType::key_type& key) +{ + return map.find(key) != map.end(); +} + +// Utility template for comparing tensor elements +template> +inline bool Compare(T a, T b, float tolerance = 0.000001f) +{ + if (ArmnnType == armnn::DataType::Boolean) + { + // NOTE: Boolean is represented as uint8_t (with zero equals + // false and everything else equals true), therefore values + // need to be casted to bool before comparing them + return static_cast(a) == static_cast(b); + } + + // NOTE: All other types can be cast to float and compared with + // a certain level of tolerance + return std::fabs(static_cast(a) - static_cast(b)) <= tolerance; +} + +template +void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo) +{ + layer->m_Weight = std::make_unique(weightInfo); + layer->m_Bias = std::make_unique(biasInfo); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); +} + +armnn::SubgraphView::InputSlots CreateInputsFrom(const std::vector& layers); + +armnn::SubgraphView::OutputSlots CreateOutputsFrom(const std::vector& layers); + +armnn::SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(armnn::SubgraphView::InputSlots&& inputs, + armnn::SubgraphView::OutputSlots&& outputs, + armnn::SubgraphView::Layers&& layers); + +armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId); + +armnn::TensorShape MakeTensorShape(unsigned int batches, + unsigned int channels, + unsigned int height, + unsigned int width, + armnn::DataLayout layout); + +template +static std::vector GenerateRandomData(size_t size) +{ + constexpr bool isIntegerType = std::is_integral::value; + using Distribution = + typename std::conditional, + std::uniform_real_distribution>::type; + + static constexpr DataType lowerLimit = std::numeric_limits::min(); + static constexpr DataType upperLimit = std::numeric_limits::max(); + + static Distribution distribution(lowerLimit, upperLimit); + static std::default_random_engine generator; + + std::vector randomData(size); + generate(randomData.begin(), randomData.end(), []() { return distribution(generator); }); + + return randomData; +} diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp new file mode 100644 index 0000000000..ea8a436177 --- /dev/null +++ b/src/armnnTestUtils/CreateWorkload.hpp @@ -0,0 +1,2316 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "TestUtils.hpp" + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +using namespace armnn; + +namespace +{ + +using namespace std; + +// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type. +template +std::unique_ptr MakeAndCheckWorkload(Layer& layer, + const IWorkloadFactory& factory, + const ModelOptions& modelOptions = {}) +{ + std::unique_ptr workload = layer.CreateWorkload(factory); + CHECK_MESSAGE(workload.get() == PolymorphicDowncast(workload.get()), + "Cannot convert to derived class"); + std::string reasonIfUnsupported; + layer.SetBackendId(factory.GetBackendId()); + CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions)); + return std::unique_ptr(static_cast(workload.release())); +} + +// Helper function to create tensor handlers for workloads, assuming they all use the same factory. +void CreateTensorHandles(armnn::Graph& graph, + armnn::IWorkloadFactory& factory) +{ + TensorHandleFactoryRegistry tmpRegistry; + for (auto&& layer : graph.TopologicalSort()) + { + layer->CreateTensorHandles(tmpRegistry, factory); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////// +// The following functions are called by backendsCommon/test/CreateWorkload*.cpp +// They build very simple graphs, and then create a workload. +// Some checks are performed on the workload to ensure parameters have been passed correctly. +// They return the created workloads so that backend-specific checks can be performed. +///////////////////////////////////////////////////////////////////////////////////////////// + +template +std::unique_ptr CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + ActivationDescriptor layerDesc; + layerDesc.m_Function = ActivationFunction::Abs; + layerDesc.m_A = 3.5f; + layerDesc.m_B = -10.0f; + + ActivationLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo({1, 1}, DataType); + + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + ActivationQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK(queueDescriptor.m_Parameters.m_A == 3.5f); + CHECK(queueDescriptor.m_Parameters.m_B == -10.0f); + CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs)); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateElementwiseWorkloadTest(armnn::IWorkloadFactory & factory, + armnn::Graph & graph) +{ + // Creates the layer we're testing. + Layer* const layer = graph.AddLayer("layer"); + + // Creates extra layers. + Layer* const input1 = graph.AddLayer(1, "input1"); + Layer* const input2 = graph.AddLayer(2, "input2"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo({2, 3}, DataType); + Connect(input1, layer, tensorInfo, 0, 0); + Connect(input2, layer, tensorInfo, 0, 1); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + DescriptorType queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 2); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateSubtractionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + SubtractionLayer* const layer = graph.AddLayer("layer"); + + auto activationDesc = std::make_shared(); + activationDesc->m_A = 10.0f; + activationDesc->m_B = 5.0f; + activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; + + layer->SetAdditionalInfoForObject(activationDesc); + + // Creates extra layers. + Layer* const input1 = graph.AddLayer(1, "input1"); + Layer* const input2 = graph.AddLayer(2, "input2"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo({2, 3}, DataType); + Connect(input1, layer, tensorInfo, 0, 0); + Connect(input2, layer, tensorInfo, 0, 1); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Check that the additional information can be queried from the layer + std::shared_ptr + activationDescPtr = layer->GetAdditionalInformation(); + + ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + DescriptorType queueDescriptor = workload->GetData(); + + const ActivationDescriptor* queueDescBlobPtr = + queueDescriptor.template GetAdditionalInformation(); + IgnoreUnused(queueDescBlobPtr); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + CHECK(queueDescriptor.m_Inputs.size() == 2); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + return workload; +} + +template +std::unique_ptr CreateMultiplicationWithBlobWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + MultiplicationLayer* const layer = graph.AddLayer("layer"); + + auto activationDesc = std::make_shared(); + activationDesc->m_A = 10.0f; + activationDesc->m_B = 5.0f; + activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; + + layer->SetAdditionalInfoForObject(activationDesc); + + // Creates extra layers. + Layer* const input1 = graph.AddLayer(1, "input1"); + Layer* const input2 = graph.AddLayer(2, "input2"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo({2, 3}, DataType); + Connect(input1, layer, tensorInfo, 0, 0); + Connect(input2, layer, tensorInfo, 0, 1); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Check that the additional information can be queried from the layer + std::shared_ptr + activationDescPtr = layer->GetAdditionalInformation(); + + ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + DescriptorType queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 2); + CHECK(queueDescriptor.m_Outputs.size() == 1); + const ActivationDescriptor* queueDescBlobPtr = + queueDescriptor.template GetAdditionalInformation(); + IgnoreUnused(queueDescBlobPtr); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + return workload;// Returns so we can do extra, backend-specific tests. +} + +template +std::unique_ptr CreateAdditionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + AdditionLayer* const layer = graph.AddLayer("layer"); + + auto activationDesc = std::make_shared(); + activationDesc->m_A = 10.0f; + activationDesc->m_B = 5.0f; + activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; + + layer->SetAdditionalInfoForObject(activationDesc); + + // Creates extra layers. + Layer* const input1 = graph.AddLayer(1, "input1"); + Layer* const input2 = graph.AddLayer(2, "input2"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo({2, 3}, DataType); + Connect(input1, layer, tensorInfo, 0, 0); + Connect(input2, layer, tensorInfo, 0, 1); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Check that the additional information can be queried from the layer + std::shared_ptr + activationDescPtr = layer->template GetAdditionalInformation(); + + ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + DescriptorType queueDescriptor = workload->GetData(); + const ActivationDescriptor* queueDescBlobPtr = + queueDescriptor.template GetAdditionalInformation(); + IgnoreUnused(queueDescBlobPtr); + CHECK(queueDescriptor.m_Inputs.size() == 2); + CHECK(queueDescriptor.m_Outputs.size() == 1); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + return workload; +} + +template +std::unique_ptr CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory, + armnn::Graph & graph, + armnn::UnaryOperation op) +{ + ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op); + Layer* const layer = graph.AddLayer(desc, "layer"); + + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + armnn::TensorInfo tensorInfo({ 2, 3 }, DataType); + Connect(input, layer, tensorInfo, 0, 0); + Connect(layer, output, tensorInfo, 0, 0); + CreateTensorHandles(graph, factory); + + auto workload = MakeAndCheckWorkload(*layer, factory); + DescriptorType queueDescriptor = workload->GetData(); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + return workload; +} + +template +std::unique_ptr CreateBatchNormalizationWorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) +{ + TensorShape tensorShape; + switch (dataLayout) + { + case DataLayout::NHWC: + tensorShape = { 2, 4, 4, 3 }; + break; + case DataLayout::NCHW: + default: + tensorShape = { 2, 3, 4, 4 }; + } + + // Creates the layer we're testing. + BatchNormalizationDescriptor layerDesc; + layerDesc.m_Eps = 0.05f; + layerDesc.m_DataLayout = dataLayout; + + BatchNormalizationLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + armnn::TensorInfo weightInfo({3}, DataType); + layer->m_Mean = std::make_unique(weightInfo); + layer->m_Variance = std::make_unique(weightInfo); + layer->m_Beta = std::make_unique(weightInfo); + layer->m_Gamma = std::make_unique(weightInfo); + layer->m_Mean->Allocate(); + layer->m_Variance->Allocate(); + layer->m_Beta->Allocate(); + layer->m_Gamma->Allocate(); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo(tensorShape, DataType); + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateBatchNormalizationWithBlobWorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) +{ + TensorShape tensorShape; + switch (dataLayout) + { + case DataLayout::NHWC: + tensorShape = { 2, 4, 4, 3 }; + break; + case DataLayout::NCHW: + default: + tensorShape = { 2, 3, 4, 4 }; + } + + // Creates the layer we're testing. + BatchNormalizationDescriptor layerDesc; + layerDesc.m_Eps = 0.05f; + layerDesc.m_DataLayout = dataLayout; + + BatchNormalizationLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + armnn::TensorInfo weightInfo({3}, DataType); + layer->m_Mean = std::make_unique(weightInfo); + layer->m_Variance = std::make_unique(weightInfo); + layer->m_Beta = std::make_unique(weightInfo); + layer->m_Gamma = std::make_unique(weightInfo); + layer->m_Mean->Allocate(); + layer->m_Variance->Allocate(); + layer->m_Beta->Allocate(); + layer->m_Gamma->Allocate(); + + auto activationDesc = std::make_shared(); + activationDesc->m_A = 10.0f; + activationDesc->m_B = 5.0f; + activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; + + layer->SetAdditionalInfoForObject(activationDesc); + + // Check that the additional information can be queried from the layer + std::shared_ptr activationDescPtr = layer->GetAdditionalInformation(); + ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo(tensorShape, DataType); + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation(); + IgnoreUnused(queueDescBlobPtr); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType))); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW, + const ModelOptions& modelOptions = {}) +{ + // Creates the layer we're testing. + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 3; + layerDesc.m_PadRight = 3; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 4; + layerDesc.m_BiasEnabled = true; + layerDesc.m_DataLayout = dataLayout; + + Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3}; + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; + + layer->m_Weight = std::make_unique(TensorInfo(weightShape, DataType)); + layer->m_Bias = std::make_unique(TensorInfo({2}, GetBiasDataType(DataType))); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + Connect(input, layer, TensorInfo(inputShape, DataType)); + Connect(layer, output, TensorInfo(outputShape, DataType)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory, modelOptions); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_StrideX == 2); + CHECK(queueDescriptor.m_Parameters.m_StrideY == 4); + CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3); + CHECK(queueDescriptor.m_Parameters.m_PadRight == 3); + CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); + CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); + CHECK(queueDescriptor.m_Parameters.m_BiasEnabled); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); + CHECK((queueDescriptor.m_Bias->GetTensorInfo() == + TensorInfo({2}, GetBiasDataType(DataType)))); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateConvolution2dFusedActivationWithBlobWorkloadTest( + armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW, + const ModelOptions& modelOptions = {}) +{ + // Creates the layer we're testing. + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 3; + layerDesc.m_PadRight = 3; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 4; + layerDesc.m_BiasEnabled = true; + layerDesc.m_DataLayout = dataLayout; + + + Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3}; + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3}; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2}; + + layer->m_Weight = std::make_unique(TensorInfo(weightShape, DataType)); + layer->m_Bias = std::make_unique(TensorInfo({2}, GetBiasDataType(DataType))); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + auto activationDesc = std::make_shared(); + activationDesc->m_A = 10.0f; + activationDesc->m_B = 5.0f; + activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; + + layer->SetAdditionalInfoForObject(activationDesc); + + // Check that the additional information can be queried from the layer + std::shared_ptr activationDescPtr = layer->GetAdditionalInformation(); + + ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + Connect(input, layer, TensorInfo(inputShape, DataType)); + Connect(layer, output, TensorInfo(outputShape, DataType)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory, modelOptions); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation(); + IgnoreUnused(queueDescBlobPtr); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + CHECK(queueDescriptor.m_Parameters.m_StrideX == 2); + CHECK(queueDescriptor.m_Parameters.m_StrideY == 4); + CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3); + CHECK(queueDescriptor.m_Parameters.m_PadRight == 3); + CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); + CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); + CHECK(queueDescriptor.m_Parameters.m_BiasEnabled); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); + CHECK((queueDescriptor.m_Bias->GetTensorInfo() == + TensorInfo({2}, GetBiasDataType(DataType)))); + CHECK(queueDescriptor.m_Inputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW, + const ModelOptions& modelOptions = {}) +{ + // Creates the layer we're testing. + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 0; + layerDesc.m_PadRight = 0; + layerDesc.m_PadTop = 0; + layerDesc.m_PadBottom = 0; + layerDesc.m_StrideX = 1; + layerDesc.m_StrideY = 1; + layerDesc.m_BiasEnabled = false; + layerDesc.m_DataLayout = dataLayout; + + Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + TensorShape weightShape = TensorShape{32, 32, 3, 3}; + TensorShape inputShape = TensorShape{1, 32, 149, 149}; + TensorShape outputShape = TensorShape{1, 32, 147, 147}; + + layer->m_Weight = std::make_unique(TensorInfo(weightShape, DataType)); + layer->m_Bias = std::make_unique(TensorInfo({2}, GetBiasDataType(DataType))); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + Connect(input, layer, TensorInfo(inputShape, DataType)); + Connect(layer, output, TensorInfo(outputShape, DataType)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory, modelOptions); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_StrideX == 1); + CHECK(queueDescriptor.m_Parameters.m_StrideY == 1); + CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0); + CHECK(queueDescriptor.m_Parameters.m_PadRight == 0); + CHECK(queueDescriptor.m_Parameters.m_PadTop == 0); + CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType))); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // This parameter setting is for withCifgWithPeepholeNoProjection + LstmDescriptor layerDesc; + layerDesc.m_ActivationFunc = 4; + layerDesc.m_ClippingThresCell = 0.0f; + layerDesc.m_ClippingThresProj = 0.0f; + layerDesc.m_CifgEnabled = true; + layerDesc.m_PeepholeEnabled = true; + layerDesc.m_ProjectionEnabled = false; + + LstmLayer* const layer = graph.AddLayer(layerDesc, "layer"); + unsigned int batchSize = 2; + unsigned int inputSize = 2; + unsigned int numUnits = 4; + unsigned int outputSize = 4; + + layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique + (TensorInfo({ numUnits, inputSize }, DataType::Float32)); + layer->m_BasicParameters.m_InputToCellWeights = std::make_unique + (TensorInfo({ numUnits, inputSize }, DataType::Float32)); + layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique + (TensorInfo({ numUnits, inputSize }, DataType::Float32)); + layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique + (TensorInfo({ numUnits, outputSize }, DataType::Float32)); + layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique + (TensorInfo({ numUnits, outputSize }, DataType::Float32)); + layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique + (TensorInfo({ numUnits, outputSize }, DataType::Float32)); + layer->m_BasicParameters.m_ForgetGateBias = std::make_unique + (TensorInfo({ numUnits }, DataType::Float32)); + layer->m_BasicParameters.m_CellBias = std::make_unique + (TensorInfo({ numUnits }, DataType::Float32)); + layer->m_BasicParameters.m_OutputGateBias = std::make_unique + (TensorInfo({ numUnits }, DataType::Float32)); + + layer->m_BasicParameters.m_InputToForgetWeights->Allocate(); + layer->m_BasicParameters.m_InputToCellWeights->Allocate(); + layer->m_BasicParameters.m_InputToOutputWeights->Allocate(); + layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate(); + layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate(); + layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate(); + layer->m_BasicParameters.m_ForgetGateBias->Allocate(); + layer->m_BasicParameters.m_CellBias->Allocate(); + layer->m_BasicParameters.m_OutputGateBias->Allocate(); + + + if (layerDesc.m_PeepholeEnabled) + { + layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique + (TensorInfo({ numUnits }, DataType::Float32)); + layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique + (TensorInfo({ numUnits }, DataType::Float32)); + layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate(); + layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate(); + } + + // create input and output layers + Layer* const input = graph.AddLayer(0, "input"); + Layer* const outputStateIn = graph.AddLayer(1, "outputStateIn"); + Layer* const cellStateIn = graph.AddLayer(2, "cellStateIn"); + Layer* const scratchBuffer = graph.AddLayer(0, "scratchBuffer"); + Layer* const outputStateOut = graph.AddLayer(1, "outputStateOut"); + Layer* const cellStateOut = graph.AddLayer(2, "cellStateOut"); + Layer* const output = graph.AddLayer(3, "output"); + + // connect up + armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32); + armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32); + armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32); + armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) }, + DataType::Float32); + Connect(input, layer, lstmTensorInfo1, 0, 0); + Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1); + Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2); + Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0); + Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0); + Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0); + Connect(layer, output, lstmTensorInfo3, 3, 0); + + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload(*layer, factory); + LstmQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4); + CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f); + CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f); + CHECK(queueDescriptor.m_Inputs.size() == 3); + CHECK(queueDescriptor.m_Outputs.size() == 4); + + CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize }, + DataType::Float32))); + CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits }, + DataType::Float32))); + CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32))); + return workload; +} + +template +std::unique_ptr CreateQuantizedLstmWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + auto layer = graph.AddLayer("quantizedLstmlayer"); + unsigned int numBatches = 2; + unsigned int inputSize = 2; + unsigned int outputSize = 4; + + // Scale/Offset for input/output, cellState In/Out, weights, bias + float inputOutputScale = 0.0078125f; + int32_t inputOutputOffset = 128; + + float cellStateScale = 0.00048828125f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.00408021f; + int32_t weightsOffset = 100; + + float biasScale = 3.1876640625e-05f; + int32_t biasOffset = 0; + + // Weights and bias tensor and quantization info + armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + + armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, + armnn::DataType::QAsymmU8, + weightsScale, + weightsOffset); + + armnn::TensorInfo biasInfo({outputSize}, + armnn::DataType::Signed32, + biasScale, + biasOffset); + + // Weights and bias + layer->m_QuantizedLstmParameters.m_InputToInputWeights = + std::make_unique(inputWeightsInfo); + layer->m_QuantizedLstmParameters.m_InputToForgetWeights = + std::make_unique(inputWeightsInfo); + layer->m_QuantizedLstmParameters.m_InputToCellWeights = + std::make_unique(inputWeightsInfo); + layer->m_QuantizedLstmParameters.m_InputToOutputWeights = + std::make_unique(inputWeightsInfo); + + layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = + std::make_unique(recurrentWeightsInfo); + layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = + std::make_unique(recurrentWeightsInfo); + layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = + std::make_unique(recurrentWeightsInfo); + layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = + std::make_unique(recurrentWeightsInfo); + + layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique(biasInfo); + layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique(biasInfo); + layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique(biasInfo); + layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique(biasInfo); + + // Allocate weights and bias + layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate(); + layer->m_QuantizedLstmParameters.m_InputToForgetWeights->Allocate(); + layer->m_QuantizedLstmParameters.m_InputToCellWeights->Allocate(); + layer->m_QuantizedLstmParameters.m_InputToOutputWeights->Allocate(); + + layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->Allocate(); + layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Allocate(); + layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->Allocate(); + layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Allocate(); + + layer->m_QuantizedLstmParameters.m_InputGateBias->Allocate(); + layer->m_QuantizedLstmParameters.m_ForgetGateBias->Allocate(); + layer->m_QuantizedLstmParameters.m_CellBias->Allocate(); + layer->m_QuantizedLstmParameters.m_OutputGateBias->Allocate(); + + // Create input and output layers + Layer* const input = graph.AddLayer(0, "input"); + Layer* const cellStateIn = graph.AddLayer(1, "cellStateIn"); + Layer* const outputStateIn = graph.AddLayer(2, "outputStateIn"); + + Layer* const cellStateOut = graph.AddLayer(0, "cellStateOut"); + Layer* const outputStateOut = graph.AddLayer(1, "outputStateOut"); + + // Input/output tensor info and quantization info + armnn::TensorInfo inputInfo({numBatches , inputSize}, + armnn::DataType::QAsymmU8, + inputOutputScale, + inputOutputOffset); + + armnn::TensorInfo cellStateInfo({numBatches , outputSize}, + armnn::DataType::QSymmS16, + cellStateScale, + cellStateOffset); + + armnn::TensorInfo outputStateInfo({numBatches , outputSize}, + armnn::DataType::QAsymmU8, + inputOutputScale, + inputOutputOffset); + + // Connect input/output slots + Connect(input, layer, inputInfo, 0, 0); + Connect(cellStateIn, layer, cellStateInfo, 0, 1); + Connect(outputStateIn, layer, outputStateInfo, 0, 2); + + Connect(layer, cellStateOut, cellStateInfo, 0, 0); + Connect(layer, outputStateOut, outputStateInfo, 1, 0); + + CreateTensorHandles(graph, factory); + + // Create workload and check layer support + auto workload = MakeAndCheckWorkload(*layer, factory); + QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData(); + + // Validate input/output sizes + CHECK(queueDescriptor.m_Inputs.size() == 3); + CHECK(queueDescriptor.m_Outputs.size() == 2); + + // Validate weight tensor info + CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo)); + CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo)); + CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo)); + CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo)); + + CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo)); + CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo)); + CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo)); + CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo)); + + CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo)); + CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo)); + CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo)); + CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo)); + + return workload; +} + +template +std::unique_ptr CreateQLstmWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + QLstmDescriptor layerDesc; + layerDesc.m_CifgEnabled = true; + layerDesc.m_PeepholeEnabled = false; + layerDesc.m_ProjectionEnabled = false; + layerDesc.m_LayerNormEnabled = true; + + layerDesc.m_CellClip = 0.0f; + layerDesc.m_ProjectionClip = 0.0f; + + layerDesc.m_HiddenStateZeroPoint = 0; + layerDesc.m_HiddenStateScale = 0.007f; + + layerDesc.m_InputIntermediateScale = 0.007059f; + layerDesc.m_ForgetIntermediateScale = 0.007812f; + layerDesc.m_CellIntermediateScale = 0.007059f; + layerDesc.m_OutputIntermediateScale = 0.007812f; + + QLstmLayer* const layer = graph.AddLayer(layerDesc, "qLstm"); + + unsigned int numBatches = 2; + unsigned int inputSize = 4; + unsigned int numUnits = 4; + unsigned int outputSize = 4; + + // Scale/Offset quantization info + float inputScale = 0.0078125f; + int32_t inputOffset = 0; + + // if (!projectionEnabled) outputScale == hiddenStateScale + float outputScale = layerDesc.m_HiddenStateScale; + int32_t outputOffset = layerDesc.m_HiddenStateZeroPoint; + + float cellStateScale = 3.05176e-05f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.00784314f; + int32_t weightsOffset = 0; + + float layerNormScale = 3.05182e-05f; + int32_t layerNormOffset = 0; + + float biasScale = layerNormScale / 1024; + int32_t biasOffset = 0; + + // Weights and bias tensor and quantization info + armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, + armnn::DataType::QSymmS8, + weightsScale, + weightsOffset); + + armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset); + + armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset); + + // Create and allocate tensors + layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(inputWeightsInfo); + layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(inputWeightsInfo); + layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique(inputWeightsInfo); + + layer->m_BasicParameters.m_RecurrentToForgetWeights = + std::make_unique(recurrentWeightsInfo); + layer->m_BasicParameters.m_RecurrentToCellWeights = + std::make_unique(recurrentWeightsInfo); + layer->m_BasicParameters.m_RecurrentToOutputWeights = + std::make_unique(recurrentWeightsInfo); + + layer->m_BasicParameters.m_ForgetGateBias = std::make_unique(biasInfo); + layer->m_BasicParameters.m_CellBias = std::make_unique(biasInfo); + layer->m_BasicParameters.m_OutputGateBias = std::make_unique(biasInfo); + + layer->m_LayerNormParameters.m_ForgetLayerNormWeights = + std::make_unique(layerNormWeightsInfo); + layer->m_LayerNormParameters.m_CellLayerNormWeights = + std::make_unique(layerNormWeightsInfo); + layer->m_LayerNormParameters.m_OutputLayerNormWeights = + std::make_unique(layerNormWeightsInfo); + + layer->m_BasicParameters.m_InputToForgetWeights->Allocate(); + layer->m_BasicParameters.m_InputToCellWeights->Allocate(); + layer->m_BasicParameters.m_InputToOutputWeights->Allocate(); + + layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate(); + layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate(); + layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate(); + + layer->m_BasicParameters.m_ForgetGateBias->Allocate(); + layer->m_BasicParameters.m_CellBias->Allocate(); + layer->m_BasicParameters.m_OutputGateBias->Allocate(); + + layer->m_LayerNormParameters.m_ForgetLayerNormWeights->Allocate(); + layer->m_LayerNormParameters.m_CellLayerNormWeights->Allocate(); + layer->m_LayerNormParameters.m_OutputLayerNormWeights->Allocate(); + + // Input and output layers + Layer* const input = graph.AddLayer(0, "input"); + Layer* const outputStateIn = graph.AddLayer(1, "outputStateIn"); + Layer* const cellStateIn = graph.AddLayer(2, "cellStateIn"); + + Layer* const outputStateOut = graph.AddLayer(0, "outputStateOut"); + Layer* const cellStateOut = graph.AddLayer(1, "cellStateOut"); + Layer* const output = graph.AddLayer(2, "output"); + + // Input/Output tensor info + armnn::TensorInfo inputInfo({numBatches , inputSize}, + armnn::DataType::QAsymmS8, + inputScale, + inputOffset); + + armnn::TensorInfo cellStateInfo({numBatches , numUnits}, + armnn::DataType::QSymmS16, + cellStateScale, + cellStateOffset); + + armnn::TensorInfo outputStateInfo({numBatches , outputSize}, + armnn::DataType::QAsymmS8, + outputScale, + outputOffset); + + // Connect layers to slots + Connect(input, layer, inputInfo, 0, 0); + Connect(outputStateIn, layer, outputStateInfo, 0, 1); + Connect(cellStateIn, layer, cellStateInfo, 0, 2); + + Connect(layer, outputStateOut, outputStateInfo, 0, 0); + Connect(layer, cellStateOut, cellStateInfo, 1, 0); + Connect(layer, output, outputStateInfo, 2, 0); + + CreateTensorHandles(graph, factory); + + // Create and check workload + auto workload = MakeAndCheckWorkload(*layer, factory); + QLstmQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f); + CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f); + CHECK(queueDescriptor.m_Inputs.size() == 3); + CHECK(queueDescriptor.m_Outputs.size() == 3); + + CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo)); + CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo)); + CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo)); + + CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo)); + CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo)); + CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo)); + + CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo)); + CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo)); + CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo)); + + return workload; +} + +template +std::unique_ptr CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 1; + layerDesc.m_PadRight = 1; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 1; + layerDesc.m_StrideY = 1; + layerDesc.m_BiasEnabled = true; + + Convolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; + + layer->m_Weight = std::make_unique(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale)); + layer->m_Bias = std::make_unique + (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale)); + Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_StrideX == 1); + CHECK(queueDescriptor.m_Parameters.m_StrideY == 1); + CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1); + CHECK(queueDescriptor.m_Parameters.m_PadRight == 1); + CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); + CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); + CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3}, + DataType, inputsQScale))); + CHECK((queueDescriptor.m_Bias->GetTensorInfo() + == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale))); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateDepthwiseConvolution2dWorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) +{ + // Creates the layer we're testing. + DepthwiseConvolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 1; + layerDesc.m_PadRight = 2; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 2; + layerDesc.m_StrideX = 1; + layerDesc.m_StrideY = 1; + layerDesc.m_BiasEnabled = false; + layerDesc.m_DataLayout = dataLayout; + + DepthwiseConvolution2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + layer->m_Weight = std::make_unique(TensorInfo({1, 4, 4, 2}, DataType)); // [ 1, H, W, I*M ] + layer->m_Weight->Allocate(); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? + TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 }; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? + TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 }; + + // Connects up. + Connect(input, layer, TensorInfo(inputShape, DataType)); + Connect(layer, output, TensorInfo(outputShape, DataType)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_StrideX == 1); + CHECK(queueDescriptor.m_Parameters.m_StrideY == 1); + CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1); + CHECK(queueDescriptor.m_Parameters.m_PadRight == 2); + CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); + CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2); + CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 4, 4, 2}, DataType))); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + FullyConnectedDescriptor layerDesc; + layerDesc.m_BiasEnabled = false; + layerDesc.m_TransposeWeightMatrix = true; + + FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; + + // As optimization isn't run member variables need to be updated. + layer->m_Weight = std::make_unique(TensorInfo({7, 20}, DataType, inputsQScale, 0)); + layer->m_Weight->Allocate(); + + armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale); + weightsTensorInfo.SetConstant(); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + auto const weights = graph.AddLayer("weights"); + Layer* const output = graph.AddLayer(0, "output"); + + weights->m_LayerOutput = std::make_unique(weightsTensorInfo); + weights->m_LayerOutput->Allocate(); + + // Connects up. + Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0); + Connect(weights, layer, weightsTensorInfo, 0, 1); + Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); + + CHECK(queueDescriptor.m_Inputs.size() == 2); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateFullyConnectedWithBlobWorkloadTest + (armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + FullyConnectedDescriptor layerDesc; + layerDesc.m_BiasEnabled = true; + layerDesc.m_TransposeWeightMatrix = true; + + FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; + + // As optimization isn't run member variables need to be updated. + layer->m_Weight = std::make_unique(TensorInfo({7, 20}, DataType, inputsQScale, 0)); + layer->m_Bias = std::make_unique(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale); + armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale); + weightsTensorInfo.SetConstant(); + biasesTensorInfo.SetConstant(); + + auto activationDesc = std::make_shared(); + activationDesc->m_A = 10.0f; + activationDesc->m_B = 5.0f; + activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu; + + layer->SetAdditionalInfoForObject(activationDesc); + + // Check that the additional information can be queried from the layer + std::shared_ptr activationDescPtr = layer->GetAdditionalInformation(); + ARMNN_ASSERT(static_cast(activationDescPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_B) == 5.0f); + ARMNN_ASSERT(static_cast(activationDescPtr->m_Function) == + armnn::ActivationFunction::BoundedReLu); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + auto const weights = graph.AddLayer("weights"); + auto const biases = graph.AddLayer("biases"); + Layer* const output = graph.AddLayer(0, "output"); + + weights->m_LayerOutput = std::make_unique(weightsTensorInfo); + weights->m_LayerOutput->Allocate(); + biases->m_LayerOutput = std::make_unique(biasesTensorInfo); + biases->m_LayerOutput->Allocate(); + + // Connects up. + Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0); + Connect(weights, layer, weightsTensorInfo, 0, 1); + Connect(biases, layer, biasesTensorInfo, 0, 2); + Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + + const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation(); + IgnoreUnused(queueDescBlobPtr); + + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_A) == 10.0f); + ARMNN_ASSERT(static_cast(queueDescBlobPtr->m_B) == 5.0f); + ARMNN_ASSERT( + static_cast(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu + ); + + CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true); + CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); + CHECK(queueDescriptor.m_Inputs.size() == 3); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest + (armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + FullyConnectedDescriptor layerDesc; + layerDesc.m_BiasEnabled = true; + layerDesc.m_TransposeWeightMatrix = true; + layerDesc.m_ConstantWeights = false; + + FullyConnectedLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; + + // Creates extra layers with weights and biases as input layers. + Layer* const input = graph.AddLayer(1, "input"); + Layer* const weights = graph.AddLayer(2, "weights"); + Layer* const biases = graph.AddLayer(3, "biases"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0); + Connect(weights, layer, TensorInfo({7, 20}, DataType, inputsQScale), 0, 1); + Connect(biases, layer, TensorInfo({7}, GetBiasDataType(DataType), inputsQScale), 0, 2); + Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale)); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + + CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true); + CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); + CHECK(queueDescriptor.m_Parameters.m_ConstantWeights == false); + CHECK(queueDescriptor.m_Inputs.size() == 3); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + + +template +std::unique_ptr CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW) +{ + // Creates the layer we're testing. + NormalizationDescriptor layerDesc; + layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across; + layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness; + layerDesc.m_NormSize = 3; + layerDesc.m_Alpha = 0.5f; + layerDesc.m_Beta = -1.0f; + layerDesc.m_K = 0.2f; + layerDesc.m_DataLayout = dataLayout; + + NormalizationLayer* layer = graph.AddLayer(layerDesc, "layer"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? + TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 }; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? + TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 }; + + // Connects up. + armnn::TensorInfo inputTensorInfo(inputShape, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across)); + CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness)); + CHECK(queueDescriptor.m_Parameters.m_NormSize == 3); + CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f); + CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f); + CHECK(queueDescriptor.m_Parameters.m_K == 0.2f); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW) +{ + // Creates the layer we're testing. + Pooling2dDescriptor layerDesc; + layerDesc.m_PoolType = PoolingAlgorithm::Average; + layerDesc.m_PoolWidth = 3; + layerDesc.m_PoolHeight = 3; + layerDesc.m_PadLeft = 2; + layerDesc.m_PadRight = 2; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 3; + layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor; + layerDesc.m_DataLayout = dataLayout; + + Pooling2dLayer* const layer = graph.AddLayer(layerDesc, "layer"); + + // Create extra layers + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2}; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2}; + + // Connect up + Connect(input, layer, TensorInfo(inputShape, DataType)); + Connect(layer, output, TensorInfo(outputShape, DataType)); + CreateTensorHandles(graph, factory); + + // Make the workload and checks it + auto workload = MakeAndCheckWorkload(*layer, factory); + + Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); + CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average)); + CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor)); + CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3); + CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3); + CHECK(queueDescriptor.m_Parameters.m_StrideX == 2); + CHECK(queueDescriptor.m_Parameters.m_StrideY == 3); + CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2); + CHECK(queueDescriptor.m_Parameters.m_PadRight == 2); + CHECK(queueDescriptor.m_Parameters.m_PadTop == 1); + CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Return so we can do extra, backend-specific tests + return workload; +} + +template +std::unique_ptr CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Create the layer we're testing. + SoftmaxDescriptor softmaxDescriptor; + // Set Axis to -1 if CL or Neon until further Axes are supported. + if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc) + { + softmaxDescriptor.m_Axis = -1; + } + + Layer* const layer = graph.AddLayer(softmaxDescriptor, "layer"); + // Create extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connect up + armnn::TensorInfo tensorInfo({4, 1}, DataType); + if (DataType == armnn::DataType::QAsymmU8) + { + tensorInfo.SetQuantizationOffset(0); + tensorInfo.SetQuantizationScale(1.f / 256); + } + else if (DataType == armnn::DataType::QAsymmS8) + { + tensorInfo.SetQuantizationOffset(-128); + tensorInfo.SetQuantizationScale(1.f / 256); + } + + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Make the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Return so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr + CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // Create the layer we're testing. + // NOTE: need three dimensions channels, height/y, width/x because the Compute + // library restricts subtensors to have the same x and y dimensions as + // their parent tensors, and therefore the origin on the x and y dimension + // has to be zero for any view. So we need a third dimension to split... + // NOTE: arguments are: number of views, number of dimensions. + ViewsDescriptor layerDesc(3, 3); + // NOTE: arguments are: view, dimension, value. + layerDesc.SetViewOriginCoord(0, 0, 0); + layerDesc.SetViewOriginCoord(1, 0, 1); + layerDesc.SetViewOriginCoord(2, 0, 3); + + Layer* const layer = graph.AddLayer(layerDesc, "layer"); + + // Adds extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output0 = graph.AddLayer(0, "output0"); + Layer* const output1 = graph.AddLayer(1, "output1"); + Layer* const output2 = graph.AddLayer(2, "output2"); + + // Connects up. + armnn::TensorInfo tensorInfo({5, 7, 7}, DataType); + Connect(input, layer, tensorInfo); + + armnn::TensorInfo output0Info({1, 7, 7}, DataType); + armnn::TensorInfo output1Info({2, 7, 7}, DataType); + armnn::TensorInfo output2Info({2, 7, 7}, DataType); + + Connect(layer, output0, output0Info, 0, 0); + Connect(layer, output1, output1Info, 1, 0); + Connect(layer, output2, output2Info, 2, 0); + + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 3); + CHECK(queueDescriptor.m_ViewOrigins.size() == 3); + + CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0); + CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1); + CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3); + CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0); + CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0); + CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0); + CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0); + CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0); + CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads. +template +std::pair, std::unique_ptr> + CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph) +{ + armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType); + + armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType); + armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType); + + //Constructs the graph. + Layer* const input = graph.AddLayer(0, "input"); + + armnn::ViewsDescriptor splitterViews(2); + splitterViews.SetViewOriginCoord(0, 0, 0); + splitterViews.SetViewOriginCoord(0, 1, 0); + splitterViews.SetViewOriginCoord(0, 2, 0); + splitterViews.SetViewOriginCoord(0, 3, 0); + + splitterViews.SetViewOriginCoord(1, 0, 0); + splitterViews.SetViewOriginCoord(1, 1, 1); + splitterViews.SetViewOriginCoord(1, 2, 0); + splitterViews.SetViewOriginCoord(1, 3, 0); + + // create splitter layer + Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); + CHECK(splitter); + + armnn::OriginsDescriptor concatViews(2); + concatViews.SetViewOriginCoord(0, 0, 0); + concatViews.SetViewOriginCoord(0, 1, 1); + concatViews.SetViewOriginCoord(0, 2, 0); + concatViews.SetViewOriginCoord(0, 3, 0); + + concatViews.SetViewOriginCoord(1, 0, 0); + concatViews.SetViewOriginCoord(1, 1, 0); + concatViews.SetViewOriginCoord(1, 2, 0); + concatViews.SetViewOriginCoord(1, 3, 0); + + // create concat layer + Layer* const concat = graph.AddLayer(concatViews, "concat"); + CHECK(concat); + + Layer* const output = graph.AddLayer(0, "output"); + + // Adds connections. + // connect input to splitter + Connect(input, splitter, inputTensorInfo, 0, 0); + // connect splitter[0] to concat[1] + Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up. + // connect splitter[1] to concat[0] + Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round. + // connect concat to output + Connect(concat, output, inputTensorInfo, 0, 0); + + // created tensor handles + CreateTensorHandles(graph, factory); + + // created splitter workload + auto workloadSplitter = MakeAndCheckWorkload(*splitter, factory); + CHECK(workloadSplitter); + // created concat workload + auto workloadConcat = MakeAndCheckWorkload(*concat, factory); + CHECK(workloadConcat); + + return {std::move(workloadSplitter), std::move(workloadConcat)}; +} + + +/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then +/// connected to two different activation layers +template +void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph, + std::unique_ptr& wlSplitter, + std::unique_ptr& wlActiv0_0, + std::unique_ptr& wlActiv0_1, + std::unique_ptr& wlActiv1_0, + std::unique_ptr& wlActiv1_1) +{ + armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType); + armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType); + armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType); + + //Constructs the graph. + Layer* const input = graph.AddLayer(0, "input"); + + armnn::ViewsDescriptor splitterViews(2); + + splitterViews.SetViewOriginCoord(0, 0, 0); + splitterViews.SetViewOriginCoord(0, 1, 0); + splitterViews.SetViewOriginCoord(0, 2, 0); + splitterViews.SetViewOriginCoord(0, 3, 0); + + splitterViews.SetViewOriginCoord(1, 0, 0); + splitterViews.SetViewOriginCoord(1, 1, 1); + splitterViews.SetViewOriginCoord(1, 2, 0); + splitterViews.SetViewOriginCoord(1, 3, 0); + + Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); + + armnn::ActivationDescriptor activationDesc; + + Layer* const activ0_0 = graph.AddLayer(activationDesc, "activ0_0"); + Layer* const activ0_1 = graph.AddLayer(activationDesc, "activ0_1"); + Layer* const activ1_0 = graph.AddLayer(activationDesc, "activ1_0"); + Layer* const activ1_1 = graph.AddLayer(activationDesc, "activ1_1"); + + Layer* const output1 = graph.AddLayer(1, "output1"); + Layer* const output2 = graph.AddLayer(2, "output2"); + Layer* const output3 = graph.AddLayer(3, "output3"); + Layer* const output4 = graph.AddLayer(4, "output4"); + + // Adds connections. + Connect(input, splitter, inputTensorInfo, 0, 0); + Connect(splitter, activ0_0, splitTensorInfo1, 0, 0); + Connect(splitter, activ0_1, splitTensorInfo1, 0, 0); + + Connect(splitter, activ1_0, splitTensorInfo2, 1, 0); + Connect(splitter, activ1_1, splitTensorInfo2, 1, 0); + + Connect(activ0_0, output1, splitTensorInfo1, 0, 0); + Connect(activ0_1, output2, splitTensorInfo1, 0, 0); + Connect(activ1_0, output3, splitTensorInfo2, 0, 0); + Connect(activ1_1, output4, splitTensorInfo2, 0, 0); + + CreateTensorHandles(graph, factory); + + auto workloadSplitter = MakeAndCheckWorkload(*splitter, factory); + auto workloadActiv0_0 = MakeAndCheckWorkload(*activ0_0, factory); + auto workloadActiv0_1 = MakeAndCheckWorkload(*activ0_1, factory); + auto workloadActiv1_0 = MakeAndCheckWorkload(*activ1_0, factory); + auto workloadActiv1_1 = MakeAndCheckWorkload(*activ1_1, factory); + + wlSplitter = std::move(workloadSplitter); + wlActiv0_0 = std::move(workloadActiv0_0); + wlActiv0_1 = std::move(workloadActiv0_1); + wlActiv1_0 = std::move(workloadActiv1_0); + wlActiv1_1 = std::move(workloadActiv1_1); +} + +template +std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW) +{ + TensorShape inputShape; + TensorShape outputShape; + + switch (dataLayout) { + case DataLayout::NHWC: + inputShape = { 2, 4, 4, 3 }; + outputShape = { 2, 2, 2, 3 }; + break; + case DataLayout::NCHW: + default: + inputShape = { 2, 3, 4, 4 }; + outputShape = { 2, 3, 2, 2 }; + } + + // Creates the layer we're testing. + ResizeDescriptor resizeDesc; + armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout; + resizeDesc.m_Method = ResizeMethod::Bilinear; + resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()]; + resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()]; + resizeDesc.m_DataLayout = dataLayout; + Layer* const layer = graph.AddLayer(resizeDesc, "resize"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo inputTensorInfo(inputShape, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + auto queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + BatchToSpaceNdDescriptor desc; + Layer* const layer = graph.AddLayer(desc, "batchToSpace"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo tensorInfo({1, 1, 1, 1}, DataType); + + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + return workload; +} + +template +std::unique_ptr CreateLogSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Create the layer we're testing. + LogSoftmaxDescriptor logSoftmaxDescriptor; + // Set Axis to -1 if CL or Neon until further Axes are supported. + if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc) + { + logSoftmaxDescriptor.m_Axis = -1; + } + + Layer* const layer = graph.AddLayer(logSoftmaxDescriptor, "layer"); + // Create extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connect up + armnn::TensorInfo tensorInfo({4, 1}, DataType); + + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // Make the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Return so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) +{ + // Creates the layer we're testing. + L2NormalizationDescriptor layerDesc; + layerDesc.m_DataLayout = dataLayout; + + Layer* const layer = graph.AddLayer(layerDesc, "l2norm"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? + TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; + TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? + TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; + + // Connects up. + armnn::TensorInfo inputTensorInfo(inputShape, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // Creates the layer we're testing. + TensorShape outputShape({ 1, 4 }); + ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputShape; + Layer* const layer = graph.AddLayer(reshapeDesc, "layer"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + ReshapeQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateConvertFp16ToFp32WorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // Creates the layer we're testing. + ConvertFp16ToFp32Layer* const layer = graph.AddLayer("Fp16ToFp32Converter"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16); + armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateConvertFp32ToFp16WorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // Creates the layer we're testing. + ConvertFp32ToFp16Layer* const layer = graph.AddLayer("Fp32ToFp16Converter"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); + armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateMeanWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // Reduce along the first and second dimensions, and do not keep the reduced dimensions. + MeanDescriptor descriptor({ 1, 2 }, false); + + // Creates the layer we're testing. + Layer* const layer = graph.AddLayer(descriptor, "mean"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo inputTensorInfo({ 1, 3, 7, 4 }, DataType); + armnn::TensorInfo outputTensorInfo({ 1, 4 }, DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + MeanQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis); + CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory, + armnn::Graph &graph, + const armnn::TensorShape &outputShape, + unsigned int concatAxis) +{ + armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + + // Constructs the graph. + Layer* const input0 = graph.AddLayer(0, "input0"); + Layer* const input1 = graph.AddLayer(1, "input1"); + armnn::OriginsDescriptor descriptor; + + std::vector inputShapes{{ 2, 3, 2, 5 }, { 2, 3, 2, 5 }}; + + descriptor = CreateDescriptorForConcatenation(inputShapes.begin(), + inputShapes.end(), + concatAxis); + + // create concat layer + Layer* const concat = graph.AddLayer(descriptor, "concat"); + CHECK(concat); + + Layer* const output = graph.AddLayer(0, "output"); + + // Adds connections. + // connect input0 to concat + Connect(input0, concat, inputTensorInfo, 0, 0); + // connect input1 to concat + Connect(input1, concat, inputTensorInfo, 0, 1); + // connect concat to output + Connect(concat, output, outputTensorInfo, 0, 0); + + // create tensor handles + CreateTensorHandles(graph, factory); + + // create concat workload + auto workloadConcat = MakeAndCheckWorkload(*concat, factory); + CHECK(workloadConcat); + + return workloadConcat; +} + +template +std::pair> CreatePreCompiledWorkloadTest( + armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + bool biasEnabled = false) +{ + IgnoreUnused(graph); + + // build up the structure of the network + armnn::INetworkPtr net(armnn::INetwork::Create()); + + // Add an input layer + armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer"); + CHECK(inputLayer); + + // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW + // ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC + // this test is using NHWC, so the weights shape is OHWI + TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0, true); + unsigned int weightsLength = weightsTensorInfo.GetNumElements(); + + using WeightType = armnn::ResolveType; + std::vector convWeightsData(weightsLength); + for (unsigned int i = 0; i < weightsLength; ++i) + { + convWeightsData[i] = static_cast(i); + } + + armnn::ConstTensor weights(weightsTensorInfo, convWeightsData); + + // Add a layer that can be used in the PreCompiled layer + armnn::Convolution2dDescriptor convDesc2d; + convDesc2d.m_StrideX = 1; + convDesc2d.m_StrideY = 1; + convDesc2d.m_BiasEnabled = biasEnabled; + convDesc2d.m_DataLayout = armnn::DataLayout::NHWC; + + armnn::IConnectableLayer* convLayer = nullptr; + const std::string convLayerName("conv layer"); + + if (biasEnabled) + { + constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ? + armnn::DataType::Signed32 : armnn::DataType::Float32; + + TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0, true); + unsigned int biasLength = biasTensorInfo.GetNumElements(); + + using BiasType = armnn::ResolveType; + std::vector biasData(biasLength); + std::fill(biasData.begin(), biasData.end(), static_cast(0)); + + armnn::ConstTensor biases(biasTensorInfo, biasData); + + // Create convolution layer with biases + convLayer = net->AddConvolution2dLayer(convDesc2d, + weights, + Optional(biases), + convLayerName.c_str()); + } + else + { + // Create convolution layer without biases + convLayer = net->AddConvolution2dLayer(convDesc2d, + weights, + EmptyOptional(), + convLayerName.c_str()); + } + + CHECK(convLayer); + + // Add an output layer + armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer"); + CHECK(outputLayer); + + // set the tensors in the network (NHWC format) + TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType); + if (dataType == armnn::DataType::QAsymmU8) + { + inputTensorInfo.SetQuantizationOffset(0); + inputTensorInfo.SetQuantizationScale(0.9f); + } + + TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType); + if (dataType == armnn::DataType::QAsymmU8) + { + outputTensorInfo.SetQuantizationOffset(0); + outputTensorInfo.SetQuantizationScale(0.9f); + } + + // Connect the layers + inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + + convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // Optimize the network for the backend supported by the factory + std::vector backends = {factory.GetBackendId()}; + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + armnn::OptimizerOptions optimizerOptions; + armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(), + optimizerOptions); + CHECK(optimizedNet != nullptr); + + // Find the PreCompiled layer in the optimised graph + armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get()); + Layer* preCompiledLayer = nullptr; + for (auto& layer : optimisedGraph) + { + if (layer->GetType() == LayerType::PreCompiled) + { + preCompiledLayer = layer; + } + } + CHECK(preCompiledLayer != nullptr); + + // Create the TensorHandles. + CreateTensorHandles(optimisedGraph, factory); + + // Make the workload and check it. + auto workload = MakeAndCheckWorkload(*preCompiledLayer, factory); + + PreCompiledQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns the workload so we can do extra, backend-specific tests. + // NOTE: We need to return the optimised network as well, otherwise it gets + // out of scope and the tensor handles get destructed + return std::make_pair(std::move(optimizedNet), std::move(workload)); +} + +template +std::unique_ptr CreateConstantWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + const armnn::TensorShape& outputShape) +{ + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + + // create constant layer + auto constant = graph.AddLayer("constant"); + CHECK(constant); + constant->m_LayerOutput = std::make_unique(outputTensorInfo); + + Layer* const output = graph.AddLayer(0, "output"); + + // Adds connections. + // connect constant to output + Connect(constant, output, outputTensorInfo, 0, 0); + + // create tensor handles + CreateTensorHandles(graph, factory); + + // create Constant workload" + auto workloadConstant = MakeAndCheckWorkload(*constant, factory); + CHECK(workloadConstant); + + return workloadConstant; +} + +template +std::unique_ptr CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& alphaShape, + const armnn::TensorShape& outputShape, + armnn::DataType dataType) +{ + // Creates the PReLU layer + Layer* const layer = graph.AddLayer("prelu"); + CHECK(layer != nullptr); + + // Creates extra layers + Layer* const input = graph.AddLayer (0, "input"); + Layer* const alpha = graph.AddLayer (1, "alpha"); + Layer* const output = graph.AddLayer(0, "output"); + CHECK(input != nullptr); + CHECK(alpha != nullptr); + CHECK(output != nullptr); + + // Connects up + armnn::TensorInfo inputTensorInfo (inputShape, dataType); + armnn::TensorInfo alphaTensorInfo (alphaShape, dataType); + armnn::TensorInfo outputTensorInfo(outputShape, dataType); + Connect(input, layer, inputTensorInfo, 0, 0); + Connect(alpha, layer, alphaTensorInfo, 0, 1); + Connect(layer, output, outputTensorInfo, 0, 0); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it + auto workload = MakeAndCheckWorkload(*layer, factory); + + PreluQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 2); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; +} + +template +std::unique_ptr CreateSpaceToDepthWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + SpaceToDepthDescriptor desc; + desc.m_BlockSize = 2; + Layer* const layer = graph.AddLayer(desc, "spaceToDepth"); + + // Creates extra layers. + Layer* const input = graph.AddLayer(0, "input"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up. + armnn::TensorInfo inputTensorInfo({ 1, 2, 2, 1 }, DataType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 4 }, DataType); + + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it. + auto workload = MakeAndCheckWorkload(*layer, factory); + + SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == 1); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + return workload; +} + +template +std::unique_ptr CreateStackWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputShape, + unsigned int axis, + unsigned int numInputs) +{ + armnn::TensorInfo inputTensorInfo(inputShape, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + + // Constructs the Stack layer. + armnn::StackDescriptor descriptor(axis, numInputs, inputShape); + Layer* const stackLayer = graph.AddLayer(descriptor, "stack"); + CHECK(stackLayer != nullptr); + + // Constructs layer inputs and output. + std::vector inputs; + for (unsigned int i=0; i( + static_cast(i), + ("input" + std::to_string(i)).c_str() + )); + CHECK(inputs[i] != nullptr); + } + Layer* const output = graph.AddLayer(0, "output"); + CHECK(output != nullptr); + + // Adds connections. + for (unsigned int i=0; i(*stackLayer, factory); + StackQueueDescriptor queueDescriptor = stackWorkload->GetData(); + CHECK(queueDescriptor.m_Inputs.size() == numInputs); + CHECK(queueDescriptor.m_Outputs.size() == 1); + + return stackWorkload; +} + +} // Anonymous namespace diff --git a/src/armnnTestUtils/DataTypeUtils.hpp b/src/armnnTestUtils/DataTypeUtils.hpp new file mode 100644 index 0000000000..528a573b99 --- /dev/null +++ b/src/armnnTestUtils/DataTypeUtils.hpp @@ -0,0 +1,45 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + + +#include + +#include + +// Utility tenmplate to convert a collection of values to the correct type +template > +std::vector ConvertToDataType(const std::vector& input, + const armnn::TensorInfo& inputTensorInfo) +{ + std::vector output(input.size()); + auto outputTensorInfo = inputTensorInfo; + outputTensorInfo.SetDataType(ArmnnType); + + std::unique_ptr> pOutputEncoder = armnn::MakeEncoder(outputTensorInfo, output.data()); + armnn::Encoder& rOutputEncoder = *pOutputEncoder; + + for (auto it = input.begin(); it != input.end(); ++it) + { + rOutputEncoder.Set(*it); + ++rOutputEncoder; + } + return output; +} + +// Utility tenmplate to convert a single value to the correct type +template +T ConvertToDataType(const float& value, + const armnn::TensorInfo& tensorInfo) +{ + std::vector output(1); + std::unique_ptr> pEncoder = armnn::MakeEncoder(tensorInfo, output.data()); + armnn::Encoder& rEncoder = *pEncoder; + rEncoder.Set(value); + return output[0]; +} diff --git a/src/armnnTestUtils/GraphUtils.cpp b/src/armnnTestUtils/GraphUtils.cpp new file mode 100644 index 0000000000..15dc888e21 --- /dev/null +++ b/src/armnnTestUtils/GraphUtils.cpp @@ -0,0 +1,78 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "GraphUtils.hpp" + +#include + +bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name) +{ + for (auto&& layer : graph) + { + if (layer->GetName() == name) + { + return true; + } + } + return false; +} + +armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name) +{ + for (auto&& layer : graph) + { + if (layer->GetNameStr() == name) + { + return layer; + } + } + return nullptr; +} + +bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num) +{ + return layer->GetNumInputSlots() == num; +} + +bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num) +{ + return layer->GetNumOutputSlots() == num; +} + +bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer, + unsigned int srcSlot, unsigned int destSlot, + const armnn::TensorInfo& expectedTensorInfo) +{ + const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(srcSlot); + const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo(); + if (expectedTensorInfo != tensorInfo) + { + return false; + } + const unsigned int numConnections = outputSlot.GetNumConnections(); + for (unsigned int c = 0; c < numConnections; ++c) + { + auto inputSlot = armnn::PolymorphicDowncast(outputSlot.GetConnection(c)); + if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() && + inputSlot->GetSlotIndex() == destSlot) + { + return true; + } + } + return false; +} + +/// Checks that first comes before second in the order. +bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second) +{ + graph.Print(); + + const auto& order = graph.TopologicalSort(); + + auto firstPos = std::find(order.begin(), order.end(), first); + auto secondPos = std::find(firstPos, order.end(), second); + + return (secondPos != order.end()); +} diff --git a/src/armnnTestUtils/GraphUtils.hpp b/src/armnnTestUtils/GraphUtils.hpp new file mode 100644 index 0000000000..95f07040f2 --- /dev/null +++ b/src/armnnTestUtils/GraphUtils.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include + + +bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name); + +armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name); + +bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num); + +bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num); + +bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer, + unsigned int srcSlot, unsigned int destSlot, + const armnn::TensorInfo& expectedTensorInfo); + +bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second); + diff --git a/src/armnnTestUtils/TensorCopyUtils.cpp b/src/armnnTestUtils/TensorCopyUtils.cpp new file mode 100644 index 0000000000..14c6d5cc61 --- /dev/null +++ b/src/armnnTestUtils/TensorCopyUtils.cpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include + +void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory) +{ + tensorHandle->CopyInFrom(memory); +} + +void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle) +{ + tensorHandle->CopyOutTo(memory); +} + +void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory) +{ + tensorHandle->Allocate(); + CopyDataToITensorHandle(tensorHandle, memory); +} diff --git a/src/armnnTestUtils/TensorHelpers.hpp b/src/armnnTestUtils/TensorHelpers.hpp new file mode 100644 index 0000000000..d51e4b1bce --- /dev/null +++ b/src/armnnTestUtils/TensorHelpers.hpp @@ -0,0 +1,235 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include + +constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f; + +template +struct SelectiveComparer +{ + static bool Compare(T a, T b) + { + return (std::max(a, b) - std::min(a, b)) <= 1; + } + +}; + +template +struct SelectiveComparer +{ + static bool Compare(T a, T b) + { + // If a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead. + if (a == 0.0f || b == 0.0f) + { + return std::abs(a - b) <= g_FloatCloseToZeroTolerance; + } + + if (std::isinf(a) && a == b) + { + return true; + } + + if (std::isnan(a) && std::isnan(b)) + { + return true; + } + + // For unquantized floats we use a tolerance of 1%. + return armnnUtils::within_percentage_tolerance(a, b); + } +}; + +template +bool SelectiveCompare(T a, T b) +{ + return SelectiveComparer()>::Compare(a, b); +}; + +template +bool SelectiveCompareBoolean(T a, T b) +{ + return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0))); +}; + +template +armnn::PredicateResult CompareTensors(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape, + bool compareBoolean = false, + bool isDynamic = false) +{ + if (actualData.size() != expectedData.size()) + { + armnn::PredicateResult res(false); + res.Message() << "Different data size [" + << actualData.size() + << "!=" + << expectedData.size() + << "]"; + return res; + } + + if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions()) + { + armnn::PredicateResult res(false); + res.Message() << "Different number of dimensions [" + << actualShape.GetNumDimensions() + << "!=" + << expectedShape.GetNumDimensions() + << "]"; + return res; + } + + if (actualShape.GetNumElements() != expectedShape.GetNumElements()) + { + armnn::PredicateResult res(false); + res.Message() << "Different number of elements [" + << actualShape.GetNumElements() + << "!=" + << expectedShape.GetNumElements() + << "]"; + return res; + } + + unsigned int numberOfDimensions = actualShape.GetNumDimensions(); + + if (!isDynamic) + { + // Checks they are same shape. + for (unsigned int i = 0; i < numberOfDimensions; ++i) + { + if (actualShape[i] != expectedShape[i]) + { + armnn::PredicateResult res(false); + res.Message() << "Different shapes [" + << actualShape[i] + << "!=" + << expectedShape[i] + << "]"; + return res; + } + } + } + + // Fun iteration over n dimensions. + std::vector indices; + for (unsigned int i = 0; i < numberOfDimensions; i++) + { + indices.emplace_back(0); + } + + std::stringstream errorString; + int numFailedElements = 0; + constexpr int maxReportedDifferences = 3; + unsigned int index = 0; + + // Compare data element by element. + while (true) + { + bool comparison; + // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans. + if(compareBoolean) + { + comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]); + } + else + { + comparison = SelectiveCompare(actualData[index], expectedData[index]); + } + + if (!comparison) + { + ++numFailedElements; + + if (numFailedElements <= maxReportedDifferences) + { + if (numFailedElements >= 2) + { + errorString << ", "; + } + errorString << "["; + for (unsigned int i = 0; i < numberOfDimensions; ++i) + { + errorString << indices[i]; + if (i != numberOfDimensions - 1) + { + errorString << ","; + } + } + errorString << "]"; + + errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")"; + } + } + + ++indices[numberOfDimensions - 1]; + for (unsigned int i=numberOfDimensions-1; i>0; i--) + { + if (indices[i] == actualShape[i]) + { + indices[i] = 0; + ++indices[i - 1]; + } + } + if (indices[0] == actualShape[0]) + { + break; + } + + index++; + } + + armnn::PredicateResult comparisonResult(true); + if (numFailedElements > 0) + { + comparisonResult.SetResult(false); + comparisonResult.Message() << numFailedElements << " different values at: "; + if (numFailedElements > maxReportedDifferences) + { + errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)"; + } + comparisonResult.Message() << errorString.str(); + } + + return comparisonResult; +} + +template +std::vector MakeRandomTensor(const armnn::TensorInfo& tensorInfo, + unsigned int seed, + float min = -10.0f, + float max = 10.0f) +{ + std::mt19937 gen(seed); + std::uniform_real_distribution dist(min, max); + + std::vector init(tensorInfo.GetNumElements()); + for (unsigned int i = 0; i < init.size(); i++) + { + init[i] = dist(gen); + } + + const float qScale = tensorInfo.GetQuantizationScale(); + const int32_t qOffset = tensorInfo.GetQuantizationOffset(); + + return armnnUtils::QuantizedVector(init, qScale, qOffset); +} diff --git a/src/armnnTestUtils/TestUtils.cpp b/src/armnnTestUtils/TestUtils.cpp new file mode 100644 index 0000000000..9ac0b3986e --- /dev/null +++ b/src/armnnTestUtils/TestUtils.cpp @@ -0,0 +1,62 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "TestUtils.hpp" + +#include + +using namespace armnn; + +void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, + unsigned int fromIndex, unsigned int toIndex) +{ + ARMNN_ASSERT(from); + ARMNN_ASSERT(to); + + try + { + from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); + } + catch (const std::out_of_range& exc) + { + std::ostringstream message; + + if (to->GetType() == armnn::LayerType::FullyConnected && toIndex == 2) + { + message << "Tried to connect bias to FullyConnected layer when bias is not enabled: "; + } + + message << "Failed to connect to input slot " + << toIndex + << " on " + << GetLayerTypeAsCString(to->GetType()) + << " layer " + << std::quoted(to->GetName()) + << " as the slot does not exist or is unavailable"; + throw LayerValidationException(message.str()); + } + + from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo); +} + +namespace armnn +{ + +Graph& GetGraphForTesting(IOptimizedNetwork* optNet) +{ + return optNet->pOptimizedNetworkImpl->GetGraph(); +} + +ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNet) +{ + return optNet->pOptimizedNetworkImpl->GetModelOptions(); +} + +profiling::ProfilingService& GetProfilingService(armnn::RuntimeImpl* runtime) +{ + return runtime->m_ProfilingService; +} + +} \ No newline at end of file diff --git a/src/armnnTestUtils/TestUtils.hpp b/src/armnnTestUtils/TestUtils.hpp new file mode 100644 index 0000000000..d5b6d1b805 --- /dev/null +++ b/src/armnnTestUtils/TestUtils.hpp @@ -0,0 +1,58 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include + +void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, + unsigned int fromIndex = 0, unsigned int toIndex = 0); + +template +bool IsLayerOfType(const armnn::Layer* const layer) +{ + return (layer->GetType() == armnn::LayerEnumOf()); +} + +inline bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last) +{ + return (first == last); +} + +/// Checks each unary function in Us evaluates true for each correspondent layer in the sequence [first, last). +template +bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last, U&& u, Us&&... us) +{ + return u(*first) && CheckSequence(std::next(first), last, us...); +} + +template +bool CheckRelatedLayers(armnn::Graph& graph, const std::list& testRelatedLayers) +{ + for (auto& layer : graph) + { + if (layer->GetType() == armnn::LayerEnumOf()) + { + auto& relatedLayers = layer->GetRelatedLayerNames(); + if (!std::equal(relatedLayers.begin(), relatedLayers.end(), testRelatedLayers.begin(), + testRelatedLayers.end())) + { + return false; + } + } + } + + return true; +} + +namespace armnn +{ +Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr); +ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr); +profiling::ProfilingService& GetProfilingService(RuntimeImpl* runtime); + +} // namespace armnn \ No newline at end of file diff --git a/src/armnnTestUtils/UnitTests.cpp b/src/armnnTestUtils/UnitTests.cpp new file mode 100644 index 0000000000..cf532a76fd --- /dev/null +++ b/src/armnnTestUtils/UnitTests.cpp @@ -0,0 +1,67 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#endif +#include + +#include "UnitTests.hpp" + +struct ConfigureLoggingFixture +{ + ConfigureLoggingFixture() + { + ConfigureLoggingTest(); + } +}; + + + +TEST_SUITE("LoggerSuite") +{ +TEST_CASE_FIXTURE(ConfigureLoggingFixture, "LoggerTest") +{ + std::stringstream ss; + { + struct StreamRedirector + { + public: + StreamRedirector(std::ostream& stream, std::streambuf* newStreamBuffer) + : m_Stream(stream) + , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) + {} + ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); } + + private: + std::ostream& m_Stream; + std::streambuf* m_BackupBuffer; + }; + + StreamRedirector redirect(std::cout, ss.rdbuf()); + + using namespace armnn; + SetLogFilter(LogSeverity::Trace); + SetAllLoggingSinks(true, false, false); + + ARMNN_LOG(trace) << "My trace message; " << -2; + ARMNN_LOG(debug) << "My debug message; " << -1; + ARMNN_LOG(info) << "My info message; " << 0; + ARMNN_LOG(warning) << "My warning message; " << 1; + ARMNN_LOG(error) << "My error message; " << 2; + ARMNN_LOG(fatal) << "My fatal message; " << 3; + + SetLogFilter(LogSeverity::Fatal); + } + + CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos); + CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos); + CHECK(ss.str().find("Info: My info message; 0") != std::string::npos); + CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos); + CHECK(ss.str().find("Error: My error message; 2") != std::string::npos); + CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos); +} + +} \ No newline at end of file diff --git a/src/armnnTestUtils/UnitTests.hpp b/src/armnnTestUtils/UnitTests.hpp new file mode 100644 index 0000000000..788ad87718 --- /dev/null +++ b/src/armnnTestUtils/UnitTests.hpp @@ -0,0 +1,191 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "TensorHelpers.hpp" +#include "WorkloadTestUtils.hpp" + +#include +#include +#include +#include + +#include + +#include +#include + +#include + +inline void ConfigureLoggingTest() +{ + // Configures logging for both the ARMNN library and this test program. + armnn::ConfigureLogging(true, true, armnn::LogSeverity::Fatal); +} + +// The following macros require the caller to have defined FactoryType, with one of the following using statements: +// +// using FactoryType = armnn::RefWorkloadFactory; +// using FactoryType = armnn::ClWorkloadFactory; +// using FactoryType = armnn::NeonWorkloadFactory; + +/// Executes CHECK_MESSAGE on CompareTensors() return value so that the predicate_result message is reported. +/// If the test reports itself as not supported then the tensors are not compared. +/// Additionally this checks that the supportedness reported by the test matches the name of the test. +/// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name. +/// This is useful because it clarifies that the feature being tested is not actually supported +/// (a passed test with the name of a feature would imply that feature was supported). +/// If support is added for a feature, the test case will fail because the name incorrectly contains UNSUPPORTED. +/// If support is removed for a feature, the test case will fail because the name doesn't contain UNSUPPORTED. +template +void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult& testResult) +{ + bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; + CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported, + "The test name does not match the supportedness it is reporting"); + if (testResult.m_Supported) + { + auto result = CompareTensors(testResult.m_ActualData, + testResult.m_ExpectedData, + testResult.m_ActualShape, + testResult.m_ExpectedShape, + testResult.m_CompareBoolean); + CHECK_MESSAGE(result.m_Result, result.m_Message.str()); + } +} + +template +void CompareTestResultIfSupported(const std::string& testName, const std::vector>& testResult) +{ + bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; + for (unsigned int i = 0; i < testResult.size(); ++i) + { + CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported, + "The test name does not match the supportedness it is reporting"); + if (testResult[i].m_Supported) + { + auto result = CompareTensors(testResult[i].m_ActualData, + testResult[i].m_ExpectedData, + testResult[i].m_ActualShape, + testResult[i].m_ExpectedShape); + CHECK_MESSAGE(result.m_Result, result.m_Message.str()); + } + } +} + +template +void RunTestFunction(const char* testName, TFuncPtr testFunction, Args... args) +{ + std::unique_ptr profiler = std::make_unique(); + armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get()); + + auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); + FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); + + auto testResult = (*testFunction)(workloadFactory, memoryManager, args...); + CompareTestResultIfSupported(testName, testResult); + + armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr); +} + + +template +void RunTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args) +{ + std::unique_ptr profiler = std::make_unique(); + armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get()); + + auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); + FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); + + auto tensorHandleFactory = WorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); + + auto testResult = (*testFunction)(workloadFactory, memoryManager, tensorHandleFactory, args...); + CompareTestResultIfSupported(testName, testResult); + + armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr); +} + +#define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \ + TEST_CASE(#TestName) \ + { \ + TestFunction(); \ + } + +#define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \ + TEST_CASE(#TestName) \ + { \ + RunTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_AUTO_TEST_FIXTURE(TestName, Fixture, TestFunction, ...) \ + TEST_CASE_FIXTURE(Fixture, #TestName) \ + { \ + RunTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \ + TEST_CASE(#TestName) \ + { \ + RunTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_AUTO_TEST_FIXTURE_WITH_THF(TestName, Fixture, TestFunction, ...) \ + TEST_CASE_FIXTURE(Fixture, #TestName) \ + { \ + RunTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +template +void CompareRefTestFunction(const char* testName, TFuncPtr testFunction, Args... args) +{ + auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); + FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); + + armnn::RefWorkloadFactory refWorkloadFactory; + + auto testResult = (*testFunction)(workloadFactory, memoryManager, refWorkloadFactory, args...); + CompareTestResultIfSupported(testName, testResult); +} + +template +void CompareRefTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args) +{ + auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); + FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); + + armnn::RefWorkloadFactory refWorkloadFactory; + auto tensorHandleFactory = WorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); + auto refTensorHandleFactory = + RefWorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); + + auto testResult = (*testFunction)( + workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, args...); + CompareTestResultIfSupported(testName, testResult); +} + +#define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \ + TEST_CASE(#TestName) \ + { \ + CompareRefTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \ + TEST_CASE(#TestName) \ + { \ + CompareRefTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \ + TEST_CASE_FIXTURE(Fixture, #TestName) \ + { \ + CompareRefTestFunction(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(TestName, Fixture, TestFunction, ...) \ + TEST_CASE_FIXTURE(Fixture, #TestName) \ + { \ + CompareRefTestFunctionUsingTensorHandleFactory(#TestName, &TestFunction, ##__VA_ARGS__); \ + } diff --git a/src/armnnTestUtils/WorkloadTestUtils.hpp b/src/armnnTestUtils/WorkloadTestUtils.hpp new file mode 100644 index 0000000000..856e54a72a --- /dev/null +++ b/src/armnnTestUtils/WorkloadTestUtils.hpp @@ -0,0 +1,113 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include +#include +#include + +namespace armnn +{ +class ITensorHandle; +} // namespace armnn + +namespace +{ + +template +void AddInputToWorkload(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Inputs.push_back(tensorHandle); + info.m_InputTensorInfos.push_back(tensorInfo); +} + +template +void AddOutputToWorkload(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Outputs.push_back(tensorHandle); + info.m_OutputTensorInfos.push_back(tensorInfo); +} + +template +void SetWorkloadInput(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + unsigned int index, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Inputs[index] = tensorHandle; + info.m_InputTensorInfos[index] = tensorInfo; +} + +template +void SetWorkloadOutput(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + unsigned int index, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Outputs[index] = tensorHandle; + info.m_OutputTensorInfos[index] = tensorInfo; +} + +inline void ExecuteWorkload(armnn::IWorkload& workload, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool memoryManagementRequested = true) +{ + const bool manageMemory = memoryManager && memoryManagementRequested; + + // Acquire working memory (if needed) + if (manageMemory) + { + memoryManager->Acquire(); + } + + // Perform PostAllocationConfiguration + workload.PostAllocationConfigure(); + + // Execute the workload + workload.Execute(); + + // Release working memory (if needed) + if (manageMemory) + { + memoryManager->Release(); + } +} + +inline armnn::Optional GetBiasTypeFromWeightsType(armnn::Optional weightsType) +{ + if (!weightsType) + { + return weightsType; + } + + switch(weightsType.value()) + { + case armnn::DataType::BFloat16: + case armnn::DataType::Float16: + case armnn::DataType::Float32: + return weightsType; + case armnn::DataType::QAsymmS8: + case armnn::DataType::QAsymmU8: + case armnn::DataType::QSymmS8: + case armnn::DataType::QSymmS16: + return armnn::DataType::Signed32; + default: + ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); + } + return armnn::EmptyOptional(); +} + +} // anonymous namespace diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp index 4000d49132..5dc78c697a 100644 --- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp +++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp @@ -6,7 +6,7 @@ #include "ParserFlatbuffersFixture.hpp" #include "ParserPrototxtFixture.hpp" #include "ParserHelper.hpp" -#include "test/GraphUtils.hpp" +#include #include #include diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp index 871f647bb2..6b35558a16 100644 --- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp +++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp @@ -17,7 +17,7 @@ #include -#include +#include #include #include diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp index 76e65dfd8c..31ee8293a2 100644 --- a/src/armnnUtils/ParserPrototxtFixture.hpp +++ b/src/armnnUtils/ParserPrototxtFixture.hpp @@ -6,7 +6,7 @@ #pragma once #include -#include +#include #include #include diff --git a/src/backends/aclCommon/test/CMakeLists.txt b/src/backends/aclCommon/test/CMakeLists.txt index 756ef4aa28..7eb232a643 100644 --- a/src/backends/aclCommon/test/CMakeLists.txt +++ b/src/backends/aclCommon/test/CMakeLists.txt @@ -13,6 +13,7 @@ list(APPEND armnnAclCommonUnitTests_sources add_library(armnnAclCommonUnitTests OBJECT ${armnnAclCommonUnitTests_sources}) target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) +target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils) target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling) target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include) diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp index bdae9988ed..6a0d5cf766 100644 --- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp +++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp @@ -4,8 +4,8 @@ // #pragma once -#include -#include +#include +#include #include #include #include diff --git a/src/backends/aclCommon/test/MemCopyTestImpl.hpp b/src/backends/aclCommon/test/MemCopyTestImpl.hpp index 91ba4eae17..d943cfd8c0 100644 --- a/src/backends/aclCommon/test/MemCopyTestImpl.hpp +++ b/src/backends/aclCommon/test/MemCopyTestImpl.hpp @@ -5,15 +5,14 @@ #pragma once #include - #include -#include -#include -#include +#include #include -#include +#include +#include +#include namespace { diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index ef2a34889e..93932a83a1 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -17,7 +18,7 @@ #include #include -#include +//#include #include @@ -45,6 +46,31 @@ const TensorInfo OverrideDataType(const TensorInfo& info, Optional typ } // anonymous namespace +inline armnn::Optional GetBiasTypeFromWeightsType(armnn::Optional weightsType) +{ + if (!weightsType) + { + return weightsType; + } + + switch(weightsType.value()) + { + case armnn::DataType::BFloat16: + case armnn::DataType::Float16: + case armnn::DataType::Float32: + return weightsType; + case armnn::DataType::QAsymmS8: + case armnn::DataType::QAsymmU8: + case armnn::DataType::QSymmS8: + case armnn::DataType::QSymmS16: + return armnn::DataType::Signed32; + default: + ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); + } + return armnn::EmptyOptional(); +} + + bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const IConnectableLayer& connectableLayer, Optional dataType, diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 206faf5020..8f97669d0a 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -35,7 +35,6 @@ COMMON_SOURCES := \ # up by the Android.mk file in the root of ArmNN COMMON_TEST_SOURCES := \ - test/CommonTestUtils.cpp \ test/CustomMemoryOptimizerStrategyTests.cpp \ test/InstanceNormalizationEndToEndTestImpl.cpp \ test/JsonPrinterTestImpl.cpp \ @@ -43,7 +42,6 @@ COMMON_TEST_SOURCES := \ test/QLstmEndToEndTestImpl.cpp \ test/QuantizedLstmEndToEndTestImpl.cpp \ test/SpaceToDepthEndToEndTestImpl.cpp \ - test/TensorCopyUtils.cpp \ test/layerTests/AbsTestImpl.cpp \ test/layerTests/ActivationTestImpl.cpp \ test/layerTests/AdditionTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp index f7d4596450..10e8363c7f 100644 --- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp @@ -8,7 +8,9 @@ #include #include -#include + +#include + #include namespace diff --git a/src/backends/backendsCommon/test/ActivationFixture.hpp b/src/backends/backendsCommon/test/ActivationFixture.hpp index c61f3f097e..caa67aca37 100644 --- a/src/backends/backendsCommon/test/ActivationFixture.hpp +++ b/src/backends/backendsCommon/test/ActivationFixture.hpp @@ -4,12 +4,12 @@ // #pragma once -#include "TensorCopyUtils.hpp" -#include "WorkloadTestUtils.hpp" +#include +#include #include -#include +#include struct ActivationFixture { diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp index 041f9f8f17..1b653858f8 100644 --- a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp index 62c06fe6d3..b40964c89a 100644 --- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp +++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp @@ -14,7 +14,7 @@ #include "ProfilingUtils.hpp" #include "RequestCounterDirectoryCommandHandler.hpp" -#include +#include #include #include diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp index 859694ceb2..87fccd8ca8 100644 --- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp @@ -8,7 +8,7 @@ #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 958f4841fb..bb85f7e22e 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -10,12 +10,15 @@ list(APPEND armnnBackendsCommonUnitTests_sources BackendIdTests.cpp BackendProfilingTests.cpp BackendRegistryTests.cpp + CommonTestUtils.hpp ChannelShuffleEndToEndTestImpl.hpp ComparisonEndToEndTestImpl.hpp CompatibilityTests.cpp ConcatEndToEndTestImpl.hpp Convolution3dEndToEndTestImpl.hpp CustomMemoryOptimizerStrategyTests.cpp + DataLayoutUtils.hpp + DataTypeUtils.hpp DefaultAsyncExecuteTest.cpp DepthToSpaceEndToEndTestImpl.hpp DequantizeEndToEndTestImpl.hpp @@ -54,7 +57,9 @@ list(APPEND armnnBackendsCommonUnitTests_sources SpaceToDepthEndToEndTestImpl.hpp SplitterEndToEndTestImpl.hpp StridedSliceAsyncEndToEndTest.hpp + TensorCopyUtils.hpp WorkloadFactoryHelper.hpp + WorkloadTestUtils.hpp layerTests/AbsTestImpl.cpp layerTests/AbsTestImpl.hpp layerTests/ActivationTestImpl.cpp @@ -116,6 +121,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/InstanceNormalizationTestImpl.hpp layerTests/L2NormalizationTestImpl.cpp layerTests/L2NormalizationTestImpl.hpp + layerTests/LayerTestResult.hpp layerTests/LogTestImpl.cpp layerTests/LogTestImpl.hpp layerTests/LogicalTestImpl.cpp @@ -200,6 +206,7 @@ endif() add_library(armnnBackendsCommonUnitTests OBJECT ${armnnBackendsCommonUnitTests_sources}) target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) +target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils) target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling) target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include) diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp index 7d46be7bcb..27907f1df3 100644 --- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/CommonTestUtils.cpp b/src/backends/backendsCommon/test/CommonTestUtils.cpp deleted file mode 100644 index 287c71ebc7..0000000000 --- a/src/backends/backendsCommon/test/CommonTestUtils.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "CommonTestUtils.hpp" - -#include - -using namespace armnn; - -SubgraphView::InputSlots CreateInputsFrom(const std::vector& layers) -{ - SubgraphView::InputSlots result; - for (auto&& layer : layers) - { - for (auto&& it = layer->BeginInputSlots(); it != layer->EndInputSlots(); ++it) - { - result.push_back(&(*it)); - } - } - return result; -} - -SubgraphView::OutputSlots CreateOutputsFrom(const std::vector& layers) -{ - SubgraphView::OutputSlots result; - for (auto && layer : layers) - { - for (auto&& it = layer->BeginOutputSlots(); it != layer->EndOutputSlots(); ++it) - { - result.push_back(&(*it)); - } - } - return result; -} - -SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::InputSlots&& inputs, - SubgraphView::OutputSlots&& outputs, - SubgraphView::Layers&& layers) -{ - return std::make_unique(std::move(inputs), std::move(outputs), std::move(layers)); -} - -armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId) -{ - auto& backendRegistry = BackendRegistryInstance(); - auto backendFactory = backendRegistry.GetFactory(backendId); - auto backendObjPtr = backendFactory(); - - return backendObjPtr; -} - -armnn::TensorShape MakeTensorShape(unsigned int batches, - unsigned int channels, - unsigned int height, - unsigned int width, - armnn::DataLayout layout) -{ - using namespace armnn; - switch (layout) - { - case DataLayout::NCHW: - return TensorShape{ batches, channels, height, width }; - case DataLayout::NHWC: - return TensorShape{ batches, height, width, channels }; - default: - throw InvalidArgumentException(std::string("Unsupported data layout: ") + GetDataLayoutName(layout)); - } -} diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp index 07523d73c4..72e3860ecb 100644 --- a/src/backends/backendsCommon/test/CommonTestUtils.hpp +++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp @@ -1,119 +1,12 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once +// This file is deprecated and will be removed soon. +// Please use the new header in armnnTestUtils instead. +// This will use the new armnnTestUtils header. +#include "../../../armnnTestUtils/CommonTestUtils.hpp" -#include -#include -#include -#include - -#include - -#include -#include - -#include - -#include -#include -#include - -// Checks that two collections have the exact same contents (in any order) -// The given collections do not have to contain duplicates -// Cannot use std::sort here because std lists have their own std::list::sort method -template -bool AreEqual(const CollectionType& lhs, const CollectionType& rhs) -{ - if (lhs.size() != rhs.size()) - { - return false; - } - - auto lhs_it = std::find_if(lhs.begin(), lhs.end(), [&rhs](auto& item) - { - return std::find(rhs.begin(), rhs.end(), item) == rhs.end(); - }); - - return lhs_it == lhs.end(); -} - -// Checks that the given collection contains the specified item -template -bool Contains(const CollectionType& collection, const typename CollectionType::value_type& item) -{ - return std::find(collection.begin(), collection.end(), item) != collection.end(); -} - -// Checks that the given map contains the specified key -template -bool Contains(const MapType& map, const typename MapType::key_type& key) -{ - return map.find(key) != map.end(); -} - -// Utility template for comparing tensor elements -template> -inline bool Compare(T a, T b, float tolerance = 0.000001f) -{ - if (ArmnnType == armnn::DataType::Boolean) - { - // NOTE: Boolean is represented as uint8_t (with zero equals - // false and everything else equals true), therefore values - // need to be casted to bool before comparing them - return static_cast(a) == static_cast(b); - } - - // NOTE: All other types can be cast to float and compared with - // a certain level of tolerance - return std::fabs(static_cast(a) - static_cast(b)) <= tolerance; -} - -template -void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo) -{ - layer->m_Weight = std::make_unique(weightInfo); - layer->m_Bias = std::make_unique(biasInfo); - - layer->m_Weight->Allocate(); - layer->m_Bias->Allocate(); -} - -armnn::SubgraphView::InputSlots CreateInputsFrom(const std::vector& layers); - -armnn::SubgraphView::OutputSlots CreateOutputsFrom(const std::vector& layers); - -armnn::SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(armnn::SubgraphView::InputSlots&& inputs, - armnn::SubgraphView::OutputSlots&& outputs, - armnn::SubgraphView::Layers&& layers); - -armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId); - -armnn::TensorShape MakeTensorShape(unsigned int batches, - unsigned int channels, - unsigned int height, - unsigned int width, - armnn::DataLayout layout); - -template -static std::vector GenerateRandomData(size_t size) -{ - constexpr bool isIntegerType = std::is_integral::value; - using Distribution = - typename std::conditional, - std::uniform_real_distribution>::type; - - static constexpr DataType lowerLimit = std::numeric_limits::min(); - static constexpr DataType upperLimit = std::numeric_limits::max(); - - static Distribution distribution(lowerLimit, upperLimit); - static std::default_random_engine generator; - - std::vector randomData(size); - generate(randomData.begin(), randomData.end(), []() { return distribution(generator); }); - - return randomData; -} +#pragma message("backendsCommon/test/CommonTestUtils.hpp has been deprecated, it is due for removal in 22.08 release." \ + " Please use from armnnTestUtils library, /src/armnnTestUtils/CommonTestUtils.hpp) \ No newline at end of file diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp index e274163c6f..4bdf3f8bee 100644 --- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "CommonTestUtils.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp index 62f0e4cd36..c8d20dace0 100644 --- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "CommonTestUtils.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp index b1f685b4cd..fab5670a4f 100644 --- a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp @@ -9,8 +9,8 @@ #include -#include -#include +#include +#include #include #include diff --git a/src/backends/backendsCommon/test/DataLayoutUtils.hpp b/src/backends/backendsCommon/test/DataLayoutUtils.hpp index 89b3900979..e920c543f0 100644 --- a/src/backends/backendsCommon/test/DataLayoutUtils.hpp +++ b/src/backends/backendsCommon/test/DataLayoutUtils.hpp @@ -1,60 +1,9 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once +#include -#include -#include - -#include - -template -void PermuteTensorNchwToNhwc(armnn::TensorInfo& tensorInfo, std::vector& tensorData) -{ - const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 }; - - tensorInfo = armnnUtils::Permuted(tensorInfo, nchwToNhwc); - - std::vector tmp(tensorData.size()); - armnnUtils::Permute(tensorInfo.GetShape(), nchwToNhwc, tensorData.data(), tmp.data(), sizeof(T)); - tensorData = tmp; -} - -template -void PermuteTensorNhwcToNchw(armnn::TensorInfo& tensorInfo, std::vector& tensorData) -{ - const armnn::PermutationVector nhwcToNchw = { 0, 2, 3, 1 }; - - tensorInfo = armnnUtils::Permuted(tensorInfo, nhwcToNchw); - - std::vector tmp(tensorData.size()); - armnnUtils::Permute(tensorInfo.GetShape(), nhwcToNchw, tensorData.data(), tmp.data(), sizeof(T)); - - tensorData = tmp; -} - -template -void PermuteTensorNdhwcToNcdhw(armnn::TensorInfo& tensorInfo, std::vector& tensorData) -{ - const armnn::PermutationVector ndhwcToNcdhw = { 0, 2, 3, 4, 1 }; - - tensorInfo = armnnUtils::Permuted(tensorInfo, ndhwcToNcdhw); - - std::vector tmp(tensorData.size()); - armnnUtils::Permute(tensorInfo.GetShape(), ndhwcToNcdhw, tensorData.data(), tmp.data(), sizeof(T)); - tensorData = tmp; -} - -template -void PermuteTensorNcdhwToNdhwc(armnn::TensorInfo& tensorInfo, std::vector& tensorData) -{ - const armnn::PermutationVector ncdhwToNdhwc = { 0, 4, 1, 2, 3 }; - - tensorInfo = armnnUtils::Permuted(tensorInfo, ncdhwToNdhwc); - - std::vector tmp(tensorData.size()); - armnnUtils::Permute(tensorInfo.GetShape(), ncdhwToNdhwc, tensorData.data(), tmp.data(), sizeof(T)); - tensorData = tmp; -} +#pragma message("backendsCommon/test/DataLayoutUtils.hpp has been deprecated, it is due for removal " \ + "in 22.08 release. Please use public interface include/armnnTestUtils/DataLayoutUtils.hpp") diff --git a/src/backends/backendsCommon/test/DataTypeUtils.hpp b/src/backends/backendsCommon/test/DataTypeUtils.hpp index cf97c8186c..03ee1d231a 100644 --- a/src/backends/backendsCommon/test/DataTypeUtils.hpp +++ b/src/backends/backendsCommon/test/DataTypeUtils.hpp @@ -1,45 +1,9 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once +#include "../../armnnTestUtils/DataTypeUtils.hpp" -#include - - -#include - -#include - -// Utility tenmplate to convert a collection of values to the correct type -template > -std::vector ConvertToDataType(const std::vector& input, - const armnn::TensorInfo& inputTensorInfo) -{ - std::vector output(input.size()); - auto outputTensorInfo = inputTensorInfo; - outputTensorInfo.SetDataType(ArmnnType); - - std::unique_ptr> pOutputEncoder = armnn::MakeEncoder(outputTensorInfo, output.data()); - armnn::Encoder& rOutputEncoder = *pOutputEncoder; - - for (auto it = input.begin(); it != input.end(); ++it) - { - rOutputEncoder.Set(*it); - ++rOutputEncoder; - } - return output; -} - -// Utility tenmplate to convert a single value to the correct type -template -T ConvertToDataType(const float& value, - const armnn::TensorInfo& tensorInfo) -{ - std::vector output(1); - std::unique_ptr> pEncoder = armnn::MakeEncoder(tensorInfo, output.data()); - armnn::Encoder& rEncoder = *pEncoder; - rEncoder.Set(value); - return output[0]; -} +#pragma message("backendsCommon/test/DataTypeUtils.hpp has been deprecated, it is due for removal in 22.08 release." \ + " Please use from armnnTestUtils library, /src/armnnTestUtils/DataTypeUtils.hpp) diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp index b64e618075..863d66caeb 100644 --- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp @@ -10,7 +10,7 @@ #include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp index fff4c4fab9..439c083673 100644 --- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp index c4488865a1..0f6d2c07dc 100644 --- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.cpp b/src/backends/backendsCommon/test/DynamicBackendTests.cpp index 669ce6020e..72688adcbd 100644 --- a/src/backends/backendsCommon/test/DynamicBackendTests.cpp +++ b/src/backends/backendsCommon/test/DynamicBackendTests.cpp @@ -5,7 +5,7 @@ #include "DynamicBackendTests.hpp" -#include +#include #include diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp index 635dc96720..3f530ccb15 100644 --- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "CommonTestUtils.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 269a46077e..d326631bf3 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp index 27e5aa0229..53722e1acd 100644 --- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp index 878b6afeee..1076aa6669 100644 --- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "CommonTestUtils.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp index 4c67ec2c8e..cf4294780d 100644 --- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp index e715e6b187..846aa76298 100644 --- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp @@ -12,9 +12,9 @@ #include -#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp index 579be513f3..f55a3c31ce 100644 --- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp +++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "CommonTestUtils.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp index 181ecd912f..9ffa2a672c 100644 --- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp @@ -8,7 +8,7 @@ #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp index 246cb509c3..bbae229927 100644 --- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp +++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp @@ -4,7 +4,7 @@ // -#include "CommonTestUtils.hpp" +#include #include "MockBackend.hpp" #include diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp index 6c76da67b3..4dd6bc955d 100644 --- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp +++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "CommonTestUtils.hpp" +#include #include "MockBackend.hpp" #include "MockBackendId.hpp" diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp index 4b932c78f8..cc7974130d 100644 --- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp +++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp index c31d084b0e..b361511f6e 100644 --- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp @@ -8,7 +8,7 @@ #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp index 7c87f358d6..a01f65ed49 100644 --- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp @@ -5,7 +5,7 @@ #include "QLstmEndToEndTestImpl.hpp" -#include "CommonTestUtils.hpp" +#include #include "EndToEndTestImpl.hpp" #include diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp index d481404f92..8a535d2b8d 100644 --- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp @@ -5,7 +5,7 @@ #include "QuantizedLstmEndToEndTestImpl.hpp" -#include "CommonTestUtils.hpp" +#include #include "EndToEndTestImpl.hpp" #include @@ -15,7 +15,7 @@ #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp index 5229c47331..9dcf705874 100644 --- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "CommonTestUtils.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp index a56db44161..94d0a4debc 100644 --- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp index e3b016ee94..b868ba3f9c 100644 --- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp @@ -12,9 +12,9 @@ #include #include -#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp index 3a2af6850c..b750a7a918 100644 --- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp @@ -10,7 +10,7 @@ #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp index 8ef5ecc203..e29782f890 100644 --- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp +++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.cpp b/src/backends/backendsCommon/test/TensorCopyUtils.cpp deleted file mode 100644 index ba7208cc40..0000000000 --- a/src/backends/backendsCommon/test/TensorCopyUtils.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "TensorCopyUtils.hpp" -#include - -void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory) -{ - tensorHandle->CopyInFrom(memory); -} - -void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle) -{ - tensorHandle->CopyOutTo(memory); -} - -void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory) -{ - tensorHandle->Allocate(); - CopyDataToITensorHandle(tensorHandle, memory); -} diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.hpp b/src/backends/backendsCommon/test/TensorCopyUtils.hpp index d3c8d9056b..e0aa7a0c3c 100644 --- a/src/backends/backendsCommon/test/TensorCopyUtils.hpp +++ b/src/backends/backendsCommon/test/TensorCopyUtils.hpp @@ -1,15 +1,9 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include - -#include - -void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory); - -void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle); - -void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory); \ No newline at end of file +// This file is deprecated and will be removed soon. +// Please use the new header in armnnTestUtils instead. +// This will use the new armnnTestUtils header. +#include \ No newline at end of file diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp index 8f10869088..d1b6945d6f 100644 --- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp index a19d12f1cc..ee632ff41b 100644 --- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp +++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "WorkloadTestUtils.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp index 3173561a94..cb605af2d3 100644 --- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp +++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp @@ -1,113 +1,9 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -#include - -#include -#include -#include -#include - -namespace armnn -{ -class ITensorHandle; -} // namespace armnn - -namespace -{ - -template -void AddInputToWorkload(QueueDescriptor& descriptor, - armnn::WorkloadInfo& info, - const armnn::TensorInfo& tensorInfo, - armnn::ITensorHandle* tensorHandle) -{ - descriptor.m_Inputs.push_back(tensorHandle); - info.m_InputTensorInfos.push_back(tensorInfo); -} - -template -void AddOutputToWorkload(QueueDescriptor& descriptor, - armnn::WorkloadInfo& info, - const armnn::TensorInfo& tensorInfo, - armnn::ITensorHandle* tensorHandle) -{ - descriptor.m_Outputs.push_back(tensorHandle); - info.m_OutputTensorInfos.push_back(tensorInfo); -} - -template -void SetWorkloadInput(QueueDescriptor& descriptor, - armnn::WorkloadInfo& info, - unsigned int index, - const armnn::TensorInfo& tensorInfo, - armnn::ITensorHandle* tensorHandle) -{ - descriptor.m_Inputs[index] = tensorHandle; - info.m_InputTensorInfos[index] = tensorInfo; -} - -template -void SetWorkloadOutput(QueueDescriptor& descriptor, - armnn::WorkloadInfo& info, - unsigned int index, - const armnn::TensorInfo& tensorInfo, - armnn::ITensorHandle* tensorHandle) -{ - descriptor.m_Outputs[index] = tensorHandle; - info.m_OutputTensorInfos[index] = tensorInfo; -} - -inline void ExecuteWorkload(armnn::IWorkload& workload, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - bool memoryManagementRequested = true) -{ - const bool manageMemory = memoryManager && memoryManagementRequested; - - // Acquire working memory (if needed) - if (manageMemory) - { - memoryManager->Acquire(); - } - - // Perform PostAllocationConfiguration - workload.PostAllocationConfigure(); - - // Execute the workload - workload.Execute(); - - // Release working memory (if needed) - if (manageMemory) - { - memoryManager->Release(); - } -} - -inline armnn::Optional GetBiasTypeFromWeightsType(armnn::Optional weightsType) -{ - if (!weightsType) - { - return weightsType; - } - - switch(weightsType.value()) - { - case armnn::DataType::BFloat16: - case armnn::DataType::Float16: - case armnn::DataType::Float32: - return weightsType; - case armnn::DataType::QAsymmS8: - case armnn::DataType::QAsymmU8: - case armnn::DataType::QSymmS8: - case armnn::DataType::QSymmS16: - return armnn::DataType::Signed32; - default: - ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); - } - return armnn::EmptyOptional(); -} - -} // anonymous namespace +// This file is deprecated and will be removed soon. +// Please use the new header in armnnTestUtils instead. +// This will use the new armnnTestUtils header. +#include "../../../armnnTestUtils/WorkloadTestUtils.hpp" diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp index 44dd9cea9d..7f2d1be972 100644 --- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp index 54052073a9..5ec8e13430 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp @@ -9,13 +9,13 @@ #include #include -#include -#include +#include +#include #include #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp index a65d97cdce..9e8b3a2b29 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp index b6d6e4ad43..a66d474f22 100644 --- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp index 34b2539c32..4f82b599f2 100644 --- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -6,11 +6,11 @@ #include "ArgMinMaxTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp index 2e7a54b6af..941470590a 100644 --- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp index 4311faff4e..90bc8d76c1 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp @@ -16,10 +16,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp index f57c061f57..11bc2973a2 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp index 3669281d48..19d472bb0f 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp @@ -5,19 +5,16 @@ #pragma once -#include "LayerTestResult.hpp" - #include - #include #include -#include -#include -#include - -#include +#include +#include +#include +#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp index bf8d5a4e24..556909860e 100644 --- a/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp index b026b4e7e0..598f205694 100644 --- a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp @@ -5,9 +5,9 @@ #include "ChannelShuffleTestImpl.hpp" -#include -#include -#include +#include +#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp index 3500e72ae7..9c5f40d550 100644 --- a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp index 68bc588860..5eccd011f6 100644 --- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp @@ -13,10 +13,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp index 301241785b..8a920e5f5f 100644 --- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp index 3eca27364d..52387298c6 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp @@ -11,10 +11,10 @@ #include -#include -#include +#include +#include -#include +#include using namespace armnn; using namespace armnnUtils; diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp index 64e0c0a722..c91ee86cf9 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp index bb827ef359..dd339badb2 100644 --- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp @@ -13,10 +13,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp index 71aacb5e62..34491b1c76 100644 --- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp index 99f1436c98..61e000a891 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp @@ -15,11 +15,11 @@ #include -#include -#include -#include +#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp index 1f54034703..f54a6f85f5 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp index a592ea3f31..4adc6ef63f 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp @@ -11,11 +11,11 @@ #include -#include -#include -#include +#include +#include +#include -#include +#include using namespace armnnUtils; diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp index c612e19c9b..127e7ef883 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp index b16ce47c8f..7699daa21d 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp @@ -5,10 +5,10 @@ #include "ConvertBf16ToFp32TestImpl.hpp" -#include -#include +#include +#include -#include +#include LayerTestResult ConvertBf16ToFp32Test( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp index 08f4c04074..db92d42aca 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp index 177acef772..2c1f9b9407 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp @@ -8,10 +8,10 @@ #include -#include -#include +#include +#include -#include +#include LayerTestResult SimpleConvertFp16ToFp32Test( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp index 8eefb77892..9e64cdd823 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp index 9ab3746b61..14a75c13de 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp @@ -5,10 +5,10 @@ #include "ConvertFp32ToBf16TestImpl.hpp" -#include -#include +#include +#include -#include +#include LayerTestResult ConvertFp32ToBf16Test( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp index 9e1da65c2e..737181def1 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp index 9946801aab..8210b2d2d1 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp @@ -6,10 +6,10 @@ #include "ConvertFp32ToFp16TestImpl.hpp" -#include -#include +#include +#include -#include +#include LayerTestResult SimpleConvertFp32ToFp16Test( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp index 39dc8a4d4d..8b6617c8ef 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp index 0539cd1470..5475dbfae7 100644 --- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp @@ -9,10 +9,10 @@ #include -#include -#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.hpp index cf4b237d27..beab583cab 100644 --- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp index 7495c6b5b3..be88e77456 100644 --- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp @@ -8,11 +8,11 @@ #include -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.hpp index 18797c66dc..c6781a99c3 100644 --- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp index 924844d92f..61fb4078c4 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp @@ -8,10 +8,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp index 1e079a75bf..8f120d3b03 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp index 2472c342ea..b9f06deaa1 100644 --- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp @@ -12,11 +12,11 @@ #include #include -#include +#include #include -#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp index 41467466b2..b5d04e5f43 100644 --- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp index 88f34f6add..3175aaf4c3 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include @@ -15,11 +15,11 @@ #include #include -#include -#include -#include +#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp index 20e341b4e2..a15add049f 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include @@ -16,11 +16,11 @@ #include #include -#include -#include -#include +#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ExpTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ExpTestImpl.hpp index 91cb669737..c7008a744f 100644 --- a/src/backends/backendsCommon/test/layerTests/ExpTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ExpTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp index bbe481657d..f433f9dd17 100644 --- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp @@ -8,10 +8,10 @@ #include -#include -#include +#include +#include -#include +#include LayerTestResult FakeQuantizationTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp index 519880e92a..d8af8c561e 100644 --- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp index 9208a311a7..41fcf59ba8 100644 --- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp @@ -5,11 +5,11 @@ #include "FillTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include template LayerTestResult SimpleFillTest( diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp index 0eaffd14b6..beaf35c050 100644 --- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp index bf871ae2f4..c05e1d833e 100644 --- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp @@ -5,11 +5,11 @@ #include "FloorTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include template LayerTestResult SimpleFloorTest( diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.hpp index 78211c6c15..ff25252d14 100644 --- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp index dcf87fe92b..59e67febdf 100644 --- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp @@ -10,11 +10,11 @@ #include -#include -#include -#include +#include +#include +#include -#include +#include // // Implementation templates diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp index ec921f7dd5..76cea90c04 100644 --- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp index 51df1eb847..edcc900f5e 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp @@ -8,10 +8,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp index 8c37f92f42..363478dd30 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp index ed656daa02..da9608c122 100644 --- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp @@ -13,11 +13,11 @@ #include #include -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp index d28069a8ef..be771441a2 100644 --- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp index 11c9766604..67f1c3c221 100644 --- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp @@ -11,10 +11,10 @@ #include #include -#include -#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp index 137ab7e8f6..283a25b187 100644 --- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp index ac60764964..e0054cad63 100644 --- a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp +++ b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp @@ -1,62 +1,15 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once +// This file is deprecated and will be removed soon. +// Please use the new header in armnnTestUtils instead. +// This will use the new armnnTestUtils header. +#include -#include -#include - -#include -#include - -template -struct LayerTestResult -{ - LayerTestResult(const armnn::TensorInfo& outputInfo) - : m_Supported(true) - , m_CompareBoolean(false) - { - m_ActualData.reserve(outputInfo.GetNumElements()); - m_ExpectedData.reserve(outputInfo.GetNumElements()); - m_ActualShape = outputInfo.GetShape(); - m_ExpectedShape = outputInfo.GetShape(); - } - - LayerTestResult(const std::vector& actualData, - const std::vector& expectedData, - const armnn::TensorShape& actualShape, - const armnn::TensorShape& expectedShape) - : m_ActualData(actualData) - , m_ExpectedData(expectedData) - , m_ActualShape(actualShape) - , m_ExpectedShape(expectedShape) - , m_Supported(true) - , m_CompareBoolean(false) - {} - - LayerTestResult(const std::vector& actualData, - const std::vector& expectedData, - const armnn::TensorShape& actualShape, - const armnn::TensorShape& expectedShape, - const bool compareBoolean) - : m_ActualData(actualData) - , m_ExpectedData(expectedData) - , m_ActualShape(actualShape) - , m_ExpectedShape(expectedShape) - , m_Supported(true) - , m_CompareBoolean(compareBoolean) - {} - - std::vector m_ActualData; - std::vector m_ExpectedData; - armnn::TensorShape m_ActualShape; - armnn::TensorShape m_ExpectedShape; - - bool m_Supported; - bool m_CompareBoolean; -}; +#pragma message("backendsCommon/test/layerTests/LayerTestResult.hpp has been deprecated, it is due for " \ + "removal in 22.08 release. Please use public interface include/armnnTestUtils/LayerTestResult.hpp") diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp index ad23f8f380..94f5a5be1c 100644 --- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp @@ -14,10 +14,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp index 1f4cc8947c..b293337554 100644 --- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp index e7e14b89d1..cf9878f592 100644 --- a/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/LogTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp index 119e76bda9..a2ce5af2f3 100644 --- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp @@ -11,10 +11,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp index 1711d90d5a..b81d2f38a1 100644 --- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp index 035c592738..56bc23cf9c 100644 --- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp @@ -11,14 +11,14 @@ #include -#include -#include +#include +#include #include #include #include -#include +#include #include namespace diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp index d27ddd6920..62bb125519 100644 --- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp index 8cc660b76f..c13059b445 100644 --- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp index 0f045d1198..9cc45e2e69 100644 --- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp index 1e84191908..bd60b20af0 100644 --- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp index 61899db00e..60fbfb0548 100644 --- a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp @@ -7,10 +7,10 @@ #include -#include -#include +#include +#include -#include +#include // // Implementation templates diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp index 52898b820c..60475fdeb8 100644 --- a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp index 9d2a95409d..72154dbc33 100644 --- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp index 126a754335..0296ca2993 100644 --- a/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp index 153afd9cd7..e3a3bea798 100644 --- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp @@ -12,10 +12,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp index bbbbc4fe02..30cd57ca05 100644 --- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp index a09e387b0e..628eed04b0 100644 --- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp @@ -7,10 +7,10 @@ #include -#include -#include +#include +#include -#include +#include // // Implementation templates diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp index 4c30c427cb..1d19aa84ff 100644 --- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp index 91add545ec..26fb5044b1 100644 --- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp @@ -11,9 +11,9 @@ #include #include -#include +#include -#include +#include template LayerTestResult SimplePermuteTestImpl( diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp index 1eaf1f9d66..7bb0e59547 100644 --- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp @@ -19,10 +19,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.hpp index bf2c39e9a3..0a25339ff5 100644 --- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp index 96a56fd9f0..ad438eaf6e 100644 --- a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp @@ -19,10 +19,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.hpp index e7cd6b4577..6c1d5defff 100644 --- a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp index 3cf85817c8..6b9aaed742 100644 --- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include @@ -14,11 +14,11 @@ #include #include -#include +#include #include -#include +#include -#include +#include template> LayerTestResult PreluTest( diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp index 029d50e718..b593620326 100644 --- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp @@ -11,10 +11,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp index 9e2f3dfe28..967155061d 100644 --- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp index c483d2cdc6..c04d7b2e82 100644 --- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp @@ -5,11 +5,11 @@ #include "RankTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include template LayerTestResult RankTest( diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp index 0aacee1aa5..27b0fcc609 100644 --- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp index 4fb0732141..b93eb55104 100644 --- a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp @@ -5,11 +5,11 @@ #include "ReduceProdTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp index 97e94978f7..4d7ddde92c 100644 --- a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp index 9f5422bcbc..c50fe75394 100644 --- a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp @@ -5,11 +5,11 @@ #include "ReduceSumTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp index db23240958..a5249b41da 100644 --- a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp index 7ce03ad13a..a69afb8438 100644 --- a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp @@ -5,11 +5,11 @@ #include "ReductionTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.hpp index 495a74b64f..14809353d7 100644 --- a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp index c3aacad4b0..ccf234588a 100644 --- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp @@ -5,11 +5,11 @@ #include "ReshapeTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.hpp index a29a965fdc..c692c95bc7 100644 --- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp index 7706bde60d..aa5bbae8d7 100644 --- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp @@ -12,11 +12,11 @@ #include #include -#include -#include -#include +#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp index ce7d41910c..bbbe861658 100644 --- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.hpp index 0df9ea7999..33d965bdac 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp index d6c03141ab..3ebb5236bc 100644 --- a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp @@ -5,11 +5,11 @@ #include "ShapeTestImpl.hpp" -#include -#include -#include +#include +#include +#include -#include +#include template LayerTestResult ShapeTest( diff --git a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.hpp index 85f7c0a453..8b95aa50c3 100644 --- a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp index b04d75a255..ee7bcb94ac 100644 --- a/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SinTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp index f3e28363c2..ddf216dec0 100644 --- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp @@ -9,10 +9,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp index d308268acd..c4d62ccd71 100644 --- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp index 375bdaa130..05c4784bfb 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp @@ -11,10 +11,10 @@ #include -#include -#include +#include +#include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp index f0efe4d233..9f93c025b3 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp index 44a37f4fe8..69f5f5aae4 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp @@ -11,10 +11,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp index 69ee99bea7..1f446b7b41 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp index 9175aec8c6..d8c5747917 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp @@ -11,10 +11,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp index 29f2646816..5a3e4934ef 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp @@ -4,7 +4,7 @@ // #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp index e19a3216c3..bf95a9fec8 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp @@ -9,10 +9,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp index 400720088c..dc76bc9233 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp index 25989f90ed..2a0e049937 100644 --- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp @@ -4,7 +4,7 @@ // #include "StackTestImpl.hpp" -#include "LayerTestResult.hpp" +#include #include @@ -12,10 +12,10 @@ #include #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp index 75e9ae82d5..24e88c4f24 100644 --- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp index af4b089cde..72ba681c7d 100644 --- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp @@ -9,10 +9,10 @@ #include -#include -#include +#include +#include -#include +#include namespace { diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp index 52feb0c01a..3806d33cae 100644 --- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp index 6113b029b8..eba0d0ab85 100644 --- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp index dae7483011..34abc86400 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp @@ -12,13 +12,13 @@ #include -#include -#include -#include +#include +#include +#include #include -#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp index 0c45b0fb9b..6af9e32dec 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp index 6be8bcb5cb..dceb386b31 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp @@ -12,9 +12,9 @@ #include #include -#include +#include -#include +#include template LayerTestResult SimpleTransposeTestImpl( diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp index d17dceb3f6..5315dd3685 100644 --- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp @@ -9,8 +9,8 @@ #include -#include -#include +#include +#include #include diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp index 20ac3135a4..88b09b9606 100644 --- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp @@ -5,7 +5,7 @@ #pragma once -#include "LayerTestResult.hpp" +#include #include #include diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt index 8ee532a323..af116a4e9f 100644 --- a/src/backends/cl/test/CMakeLists.txt +++ b/src/backends/cl/test/CMakeLists.txt @@ -37,6 +37,7 @@ endif() add_library(armnnClBackendUnitTests OBJECT ${armnnClBackendUnitTests_sources}) target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) +target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils) target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling) target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include) diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index 4e403283e7..dd53f38382 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -10,8 +10,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp index cfe2b369ac..6ac94337ba 100644 --- a/src/backends/cl/test/ClFallbackTests.cpp +++ b/src/backends/cl/test/ClFallbackTests.cpp @@ -3,9 +3,9 @@ // SPDX-License-Identifier: MIT // -#include +#include -#include +#include #include diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp index b18da11176..1747cb6763 100644 --- a/src/backends/cl/test/ClLayerSupportTests.cpp +++ b/src/backends/cl/test/ClLayerSupportTests.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 6c27d51853..967a7e446c 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -6,8 +6,8 @@ #include "ClContextControlFixture.hpp" #include "ClWorkloadFactoryHelper.hpp" -#include "test/TensorHelpers.hpp" -#include "test/UnitTests.hpp" +#include +#include #include #include diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp index 4c2a474526..cf17eae208 100644 --- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp +++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp @@ -7,7 +7,7 @@ #include -#include +#include #include #include diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp index 0da1db73b8..85fdc81a94 100644 --- a/src/backends/cl/test/OpenClTimerTest.cpp +++ b/src/backends/cl/test/OpenClTimerTest.cpp @@ -7,7 +7,7 @@ #include "ClWorkloadFactoryHelper.hpp" -#include +#include #include #include @@ -16,8 +16,8 @@ #include #include -#include -#include +#include +#include #include diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt index 9a45b7ddbe..e4c5b34532 100644 --- a/src/backends/neon/test/CMakeLists.txt +++ b/src/backends/neon/test/CMakeLists.txt @@ -53,6 +53,7 @@ endif() add_library(armnnNeonBackendUnitTests OBJECT ${armnnNeonBackendUnitTests_sources}) target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) +target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils) target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling) target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include) diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp index ae6cfae3fa..d2de843fd9 100644 --- a/src/backends/neon/test/NeonFallbackTests.cpp +++ b/src/backends/neon/test/NeonFallbackTests.cpp @@ -3,10 +3,10 @@ // SPDX-License-Identifier: MIT // -#include +#include #include -#include +#include #include diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp index 494c8f927f..fbb91a96c7 100644 --- a/src/backends/neon/test/NeonLayerSupportTests.cpp +++ b/src/backends/neon/test/NeonLayerSupportTests.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 1750f24853..3b63a88457 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -5,8 +5,8 @@ #include "NeonWorkloadFactoryHelper.hpp" -#include -#include +#include +#include #include #include diff --git a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp index 5a65b155ef..24109605cc 100644 --- a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp +++ b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp @@ -5,8 +5,9 @@ #include "NeonWorkloadFactoryHelper.hpp" +#include +#include #include -#include #include diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp index 685a0744e7..9d69a5c2d0 100644 --- a/src/backends/neon/test/NeonTensorHandleTests.cpp +++ b/src/backends/neon/test/NeonTensorHandleTests.cpp @@ -11,9 +11,9 @@ #include #include -#include +#include #include -#include +#include #include #include diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp index 87e15679df..3596dfa87c 100644 --- a/src/backends/neon/test/NeonTimerTest.cpp +++ b/src/backends/neon/test/NeonTimerTest.cpp @@ -6,7 +6,7 @@ #include "NeonWorkloadFactoryHelper.hpp" -#include +#include #include #include @@ -15,8 +15,8 @@ #include #include -#include -#include +#include +#include #include diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt index d7c5da896a..d5ce3553f1 100644 --- a/src/backends/reference/test/CMakeLists.txt +++ b/src/backends/reference/test/CMakeLists.txt @@ -23,6 +23,7 @@ list(APPEND armnnRefBackendUnitTests_sources add_library(armnnRefBackendUnitTests OBJECT ${armnnRefBackendUnitTests_sources}) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) +target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnTestUtils) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include) diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index fae8d0cdd4..e865f25f49 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include +#include #include #include diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp index 1adc54e990..f0eb62f12c 100644 --- a/src/backends/reference/test/RefLayerSupportTests.cpp +++ b/src/backends/reference/test/RefLayerSupportTests.cpp @@ -5,7 +5,8 @@ #include #include -#include + +#include #include #include diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 13487dd53f..9f5814b395 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -9,7 +9,7 @@ #include -#include +#include TEST_SUITE("Compute_Reference") { diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp index 578d667983..5b128a1f6c 100644 --- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp +++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp index 6571715c63..9f8514d009 100644 --- a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp +++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp @@ -3,7 +3,6 @@ // SPDX-License-Identifier: MIT // -#include #include #include #include "RefChannelShuffleWorkload.hpp" diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp index e0d3dd717c..51f27d4387 100644 --- a/src/profiling/test/ProfilingTestUtils.cpp +++ b/src/profiling/test/ProfilingTestUtils.cpp @@ -16,7 +16,7 @@ #include -#include +#include #include -- cgit v1.2.1