diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2023-01-12 11:17:03 +0000 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2023-01-12 11:18:21 +0000 |
commit | d69c1c595375b904a7f19f562ac1d54098184b4e (patch) | |
tree | b2c4980eb367aa160282aae5c2deda8ef19682de /src/backends/backendsCommon | |
parent | 267c985a6322fbc1efa22ba44188ac867537f1b1 (diff) | |
download | armnn-d69c1c595375b904a7f19f562ac1d54098184b4e.tar.gz |
Merge 'main' onto 'experimental/GpuFsa'.
* I6c71be11e9b73694747b27fe9febab8d9669b4d4
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Iccaf50e2484559979d801ee9d0e130e848554733
Diffstat (limited to 'src/backends/backendsCommon')
6 files changed, 210 insertions, 10 deletions
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt index b2ab932384..8d7e114fa5 100644 --- a/src/backends/backendsCommon/CMakeLists.txt +++ b/src/backends/backendsCommon/CMakeLists.txt @@ -1,9 +1,9 @@ # -# Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # -if(NOT BUILD_BARE_METAL) +if(NOT BUILD_BARE_METAL AND NOT EXECUTE_NETWORK_STATIC) list(APPEND armnnBackendsCommon_sources DynamicBackend.cpp DynamicBackend.hpp diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp index d49fa7f2ec..9041b55c57 100644 --- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp +++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -169,11 +169,6 @@ TEST_CASE("BackendProfilingCounterRegisterMockBackendTest") unsigned int shiftedId = 0; - if (armnn::BackendRegistryInstance().IsBackendRegistered("EthosNAcc")) - { - shiftedId = 4; - } - // Check if the MockBackends 3 dummy counters {0, 1, 2-5 (four cores)} are registered armnn::BackendId mockId = armnn::MockBackendId(); const ICounterMappings& counterMap = GetProfilingService(&runtime).GetCounterMappings(); diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 5fcc8b592e..d251bd2597 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2017-2022 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -41,6 +41,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources LogSoftmaxEndToEndTestImpl.hpp MemoryManagerTests.cpp MockBackendId.hpp + MultiplicationEndToEndTestImpl.hpp OptimizeSubgraphViewTests.cpp OptimizationViewsTests.cpp PreluEndToEndTestImpl.hpp @@ -57,6 +58,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources SpaceToDepthEndToEndTestImpl.hpp SplitterEndToEndTestImpl.hpp StridedSliceAsyncEndToEndTest.hpp + SubtractionEndToEndTestImpl.hpp TransposeEndToEndTestImpl.hpp TensorCopyUtils.hpp WorkloadFactoryHelper.hpp diff --git a/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp new file mode 100644 index 0000000000..40442e2d47 --- /dev/null +++ b/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp @@ -0,0 +1,96 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include <armnn/INetwork.hpp> + +#include <CommonTestUtils.hpp> +#include <ResolveType.hpp> + +#include <doctest/doctest.h> + +namespace +{ + +template<typename armnn::DataType DataType> +armnn::INetworkPtr CreateMultiplicationNetwork(const armnn::TensorShape& inputXShape, + const armnn::TensorShape& inputYShape, + const armnn::TensorShape& outputShape, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + + TensorInfo inputXTensorInfo(inputXShape, DataType, qScale, qOffset, true); + TensorInfo inputYTensorInfo(inputYShape, DataType, qScale, qOffset, true); + + TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + + + IConnectableLayer* multiplication = network->AddMultiplicationLayer("multiplication"); + IConnectableLayer* inputX = network->AddInputLayer(0, "inputX"); + IConnectableLayer* inputY = network->AddInputLayer(1, "inputY"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(inputX, multiplication, inputXTensorInfo, 0, 0); + Connect(inputY, multiplication, inputYTensorInfo, 0, 1); + Connect(multiplication, output, outputTensorInfo, 0, 0); + + return network; +} + +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +void MultiplicationEndToEnd(const std::vector<armnn::BackendId>& backends) +{ + using namespace armnn; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateMultiplicationNetwork<ArmnnType>(inputXShape, inputYShape, outputShape); + + CHECK(network); + + std::vector<T> inputXData{ 1, 2, 3, 4 }; + std::vector<T> inputYData{ 5, 2, 6, 3 }; + std::vector<T> expectedOutput{ 5, 4, 18, 12 }; + + std::map<int, std::vector<T>> inputTensorData = {{ 0, inputXData }, {1, inputYData}}; + std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends); +} + +template<armnn::DataType ArmnnType> +void MultiplicationEndToEndFloat16(const std::vector<armnn::BackendId>& backends) +{ + using namespace armnn; + using namespace half_float::literal; + using Half = half_float::half; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateMultiplicationNetwork<ArmnnType>(inputXShape, inputYShape, outputShape); + CHECK(network); + + std::vector<Half> inputXData{ 1._h, 2._h, + 3._h, 4._h }; + std::vector<Half> inputYData{ 1._h, 2._h, + 3._h, 4._h }; + std::vector<Half> expectedOutput{ 1._h, 4._h, + 9._h, 16._h }; + + std::map<int, std::vector<Half>> inputTensorData = {{ 0, inputXData }, { 1, inputYData }}; + std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp index 9b86784dce..ff84eea2de 100644 --- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp +++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2019-2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -288,4 +288,15 @@ TEST_CASE("OptimizeViewsValidateDeviceMockBackend") CheckLayers(graph); } +TEST_CASE("OptimizedViewsReturnsINetworkReference") +{ + OptimizationViews view; + + auto layer = view.GetINetworkRef().AddInputLayer(0, "input"); + + // Check layer has been added to the referenced INetwork + CHECK(layer); +} + + }
\ No newline at end of file diff --git a/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp new file mode 100644 index 0000000000..747fe26df0 --- /dev/null +++ b/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp @@ -0,0 +1,96 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include <armnn/INetwork.hpp> + +#include <CommonTestUtils.hpp> +#include <ResolveType.hpp> + +#include <doctest/doctest.h> + +namespace +{ + +template<typename armnn::DataType DataType> +armnn::INetworkPtr CreateSubtractionNetwork(const armnn::TensorShape& inputXShape, + const armnn::TensorShape& inputYShape, + const armnn::TensorShape& outputShape, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + + TensorInfo inputXTensorInfo(inputXShape, DataType, qScale, qOffset, true); + TensorInfo inputYTensorInfo(inputYShape, DataType, qScale, qOffset, true); + + TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + + + IConnectableLayer* subtraction = network->AddSubtractionLayer("subtraction"); + IConnectableLayer* inputX = network->AddInputLayer(0, "inputX"); + IConnectableLayer* inputY = network->AddInputLayer(1, "inputY"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(inputX, subtraction, inputXTensorInfo, 0, 0); + Connect(inputY, subtraction, inputYTensorInfo, 0, 1); + Connect(subtraction, output, outputTensorInfo, 0, 0); + + return network; +} + +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +void SubtractionEndToEnd(const std::vector<armnn::BackendId>& backends) +{ + using namespace armnn; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateSubtractionNetwork<ArmnnType>(inputXShape, inputYShape, outputShape); + + CHECK(network); + + std::vector<T> inputXData{ 10, 11, 12, 13 }; + std::vector<T> inputYData{ 5, 7, 6, 8 }; + std::vector<T> expectedOutput{ 5, 4, 6, 5 }; + + std::map<int, std::vector<T>> inputTensorData = {{ 0, inputXData }, {1, inputYData}}; + std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends); +} + +template<armnn::DataType ArmnnType> +void SubtractionEndToEndFloat16(const std::vector<armnn::BackendId>& backends) +{ + using namespace armnn; + using namespace half_float::literal; + using Half = half_float::half; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateSubtractionNetwork<ArmnnType>(inputXShape, inputYShape, outputShape); + CHECK(network); + + std::vector<Half> inputXData{ 11._h, 12._h, + 13._h, 14._h }; + std::vector<Half> inputYData{ 5._h, 7._h, + 6._h, 8._h }; + std::vector<Half> expectedOutput{ 6._h, 5._h, + 7._h, 6._h }; + + std::map<int, std::vector<Half>> inputTensorData = {{ 0, inputXData }, { 1, inputYData }}; + std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace |