aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-12-04 17:23:09 +0000
committerNikhil Raj Arm <nikhil.raj@arm.com>2023-12-05 16:58:10 +0000
commita9c3267d1a20e69a9cc0ae98b52958a6277e2f0d (patch)
treecf8ba49f42bcdb6d8360ca705b247bdd656925f8
parenta8337d7a1e9f5aa3ed380dd0f5a4cf7636360122 (diff)
downloadarmnn-a9c3267d1a20e69a9cc0ae98b52958a6277e2f0d.tar.gz
IVGCVSW-8159 Fixed issues building with NDK r26
* The compiler shipped with NDK r26 has stricter rules around certain warnings and deprecation notices. * Fixed warnings for unqualified call to 'std::move' * Fixed error where the half values weren't being cast to a float when calling 'std::nan' * Removed unnecessary subtensor unit tests for neon Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I4ceb46e55ff5f2a754452e3a43de2188d58bf927
-rw-r--r--include/armnn/utility/TransformIterator.hpp5
-rw-r--r--include/armnnTestUtils/TensorHelpers.hpp2
-rw-r--r--shim/sl/canonical/ArmnnDriverImpl.cpp4
-rw-r--r--src/armnn/ExecutionFrame.cpp2
-rw-r--r--src/armnn/ExecutionFrame.hpp2
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp2
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp4
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp8
-rw-r--r--src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp8
-rw-r--r--src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp8
-rw-r--r--src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp22
-rw-r--r--src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp6
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp260
26 files changed, 66 insertions, 325 deletions
diff --git a/include/armnn/utility/TransformIterator.hpp b/include/armnn/utility/TransformIterator.hpp
index b038447f36..e37c8a7155 100644
--- a/include/armnn/utility/TransformIterator.hpp
+++ b/include/armnn/utility/TransformIterator.hpp
@@ -8,7 +8,6 @@
namespace armnn
{
-
template<typename Function,
typename Iterator,
typename Category = typename std::iterator_traits<Iterator>::iterator_category,
@@ -16,7 +15,7 @@ template<typename Function,
typename Distance = typename std::iterator_traits<Iterator>::difference_type,
typename Pointer = typename std::iterator_traits<Iterator>::pointer,
typename Reference =
- typename std::result_of<const Function(typename std::iterator_traits<Iterator>::reference)>::type
+ typename std::invoke_result<const Function, typename std::iterator_traits<Iterator>::reference>::type
>
class TransformIterator
{
@@ -73,7 +72,7 @@ public:
bool operator<=(const TransformIterator& rhs) const {return m_it <= rhs.m_it;}
bool operator==(TransformIterator other) const {return (m_it == other.m_it);}
- bool operator!=(TransformIterator other) const {return !(m_it == other.m_it);}
+ bool operator!=(TransformIterator other) const {return (m_it != other.m_it);}
Reference operator*() const {return m_fn(*m_it);}
diff --git a/include/armnnTestUtils/TensorHelpers.hpp b/include/armnnTestUtils/TensorHelpers.hpp
index fa9c97032c..14c5061334 100644
--- a/include/armnnTestUtils/TensorHelpers.hpp
+++ b/include/armnnTestUtils/TensorHelpers.hpp
@@ -47,7 +47,7 @@ struct SelectiveComparer<T, false>
return true;
}
- if (std::isnan(a) && std::isnan(b))
+ if (std::isnan(static_cast<float>(a)) && std::isnan(static_cast<float>(b)))
{
return true;
}
diff --git a/shim/sl/canonical/ArmnnDriverImpl.cpp b/shim/sl/canonical/ArmnnDriverImpl.cpp
index 060dd5a252..0063149adb 100644
--- a/shim/sl/canonical/ArmnnDriverImpl.cpp
+++ b/shim/sl/canonical/ArmnnDriverImpl.cpp
@@ -233,7 +233,7 @@ GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModel(
auto numOutputs = getMainModel(model).outputIndexes.size();
try
{
- if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
+ if (runtime->LoadNetwork(netId, std::move(optNet), msg, networkProperties) != armnn::Status::Success)
{
return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
}
@@ -530,7 +530,7 @@ GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModelFromCache(
options.IsGpuProfilingEnabled());
try
{
- if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
+ if (runtime->LoadNetwork(netId, std::move(optNet), msg, networkProperties) != armnn::Status::Success)
{
return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
}
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 92a7990881..118fa7ead8 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -39,7 +39,7 @@ void ExecutionFrame::RegisterDebugCallback(const DebugCallbackFunction& func)
void ExecutionFrame::AddWorkloadToQueue(std::unique_ptr<IWorkload> workload)
{
- m_WorkloadQueue.push_back(move(workload));
+ m_WorkloadQueue.push_back(std::move(workload));
}
void ExecutionFrame::SetNextExecutionFrame(IExecutionFrame* nextExecutionFrame)
diff --git a/src/armnn/ExecutionFrame.hpp b/src/armnn/ExecutionFrame.hpp
index 20a5da0256..3f2407b202 100644
--- a/src/armnn/ExecutionFrame.hpp
+++ b/src/armnn/ExecutionFrame.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index 0b717bc0fd..415ffebf58 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -66,7 +66,7 @@ struct ParserFlatbuffersSerializeFixture
m_Runtime->GetDeviceSpec());
std::string errorMessage;
- armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+ armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 9e98774ada..a3fb3d0168 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -135,7 +135,7 @@ struct ParserFlatbuffersFixture
m_Runtime->GetDeviceSpec());
std::string errorMessage;
- armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
+ armnn::Status ret = m_Runtime->LoadNetwork(networkId, std::move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index ccb99be594..a12a66ea25 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -127,7 +127,7 @@ void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::Te
armnn::INetworkPtr network =
m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
- armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+ armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
@@ -144,7 +144,7 @@ void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::Te
armnn::INetworkPtr network =
m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes);
auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
- armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+ armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
@@ -161,7 +161,7 @@ void ParserPrototxtFixture<TParser>::Setup()
armnn::INetworkPtr network =
m_Parser->CreateNetworkFromString(m_Prototext.c_str());
auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
- armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+ armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
index 10e8363c7f..c6d49b1df6 100644
--- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -106,7 +106,7 @@ void ActivationEndToEndImpl(const std::vector<armnn::BackendId>& backends,
float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net),
inputTensorData,
expectedOutputTensorData,
backends,
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index 87fccd8ca8..1936af64f3 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -72,7 +72,7 @@ void BatchToSpaceNdEndToEnd(const std::vector<BackendId>& backends, armnn::DataL
std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -113,7 +113,7 @@ void BatchToSpaceNdComplexEndToEnd(const std::vector<BackendId>& backends, armnn
std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
index 27907f1df3..9e8a42d7b3 100644
--- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -65,7 +65,7 @@ void ChannelShuffleEndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0, inputData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index 4bdf3f8bee..e2a0d669f3 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -72,7 +72,8 @@ void ComparisonSimpleEndToEnd(const std::vector<BackendId>& backends,
std::map<int, std::vector<TInput>> inputTensorData = {{ 0, input0 }, { 1, input1 }};
std::map<int, std::vector<uint8_t>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(std::move(net), inputTensorData, expectedOutputData,
+ backends);
}
template<armnn::DataType ArmnnInType,
@@ -97,7 +98,8 @@ void ComparisonBroadcastEndToEnd(const std::vector<BackendId>& backends,
std::map<int, std::vector<TInput>> inputTensorData = {{ 0, input0 }, { 1, input1 }};
std::map<int, std::vector<uint8_t>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnInType, DataType::Boolean>(std::move(net), inputTensorData, expectedOutputData,
+ backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index 439c083673..82fceb81aa 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -54,7 +54,7 @@ void DequantizeEndToEndLayerTestImpl(const std::vector<BackendId>& backends,
std::map<int, std::vector<float>> expectedOutputData = { { 0, expectedOutput } };
EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Float32>(
- move(net), inputTensorData, expectedOutputData, backends);
+ std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index 0f6d2c07dc..9d6c0baa0b 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -103,7 +103,7 @@ void DetectionPostProcessEndToEnd(const std::vector<BackendId>& backends, bool u
{ 3, expectedNumDetections }};
EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Float32>(
- move(net), inputTensorData, expectedOutputData, backends);
+ std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 53722e1acd..c3d031cb90 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -62,7 +62,7 @@ void FillEndToEnd(const std::vector<armnn::BackendId>& backends)
std::map<int, std::vector<int32_t>> inputTensorData = {{ 0, inputData }};
std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
- EndToEndLayerTestImpl<DataType::Signed32, ArmnnType>(move(network),
+ EndToEndLayerTestImpl<DataType::Signed32, ArmnnType>(std::move(network),
inputTensorData,
expectedOutputTensorData,
backends);
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index 0d2d2cb2de..a65f3b4b98 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -210,7 +210,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId
std::map<int, std::vector<T>> inputTensorData = {{ 0, inputData }, {1, weightsData}};
std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
inputTensorData,
expectedOutputTensorData,
backends,
@@ -305,7 +305,7 @@ void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::
std::map<int, std::vector<T>> inputTensorData = {{ 0, input }, {1, weights}};
std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
inputTensorData,
expectedOutputTensorData,
backends,
@@ -327,7 +327,7 @@ void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::
std::map<int, std::vector<T>> inputTensorData = {{ 0, input }, {2, biasValues}};
std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
inputTensorData,
expectedOutputTensorData,
backends,
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index cf4294780d..45b1f39779 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -68,7 +68,7 @@ void GatherEndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -124,7 +124,7 @@ void GatherMultiDimEndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
index 0eea91190e..6adaa5bd70 100644
--- a/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherNdEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -87,7 +87,7 @@ void GatherNdEndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -155,7 +155,7 @@ void GatherNdMultiDimEndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index 846aa76298..7b321701dc 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -87,7 +87,7 @@ void InstanceNormalizationEndToEnd(const std::vector<armnn::BackendId>& backends
std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
- EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(move(net),
+ EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(std::move(net),
inputTensorData,
expectedOutputTensorData,
backends);
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index 9ffa2a672c..46cd33cd76 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -65,7 +65,7 @@ void LogSoftmaxEndToEnd(const std::vector<armnn::BackendId>& backends,
std::map<int, std::vector<float>> inputTensorData = { {0, inputData} };
std::map<int, std::vector<float>> expectedOutputTensorData = { {0, expectedOutputData} };
- EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(move(net),
+ EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(std::move(net),
inputTensorData,
expectedOutputTensorData,
backends);
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index b361511f6e..4988e745d0 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -65,7 +65,7 @@ void PreluEndToEnd(const std::vector<BackendId>& backends,
std::map<int, std::vector<T>> inputTensorData = { { 0, inputData }, { 1, alphaData} };
std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net),
inputTensorData,
expectedOutputTensorData,
backends);
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index 9dcf705874..035b2e9ddd 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -57,7 +57,7 @@ void RankEndToEnd(const std::vector<armnn::BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0, inputData }};
std::map<int, std::vector<int32_t>> expectedOutputTensorData = {{ 0, expectedOutputData }};
- EndToEndLayerTestImpl<ArmnnType, DataType::Signed32>(move(network),
+ EndToEndLayerTestImpl<ArmnnType, DataType::Signed32>(std::move(network),
inputTensorData,
expectedOutputTensorData,
backends);
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index b868ba3f9c..a1bd755b1c 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -87,7 +87,7 @@ void SpaceToDepthEndToEnd(const std::vector<armnn::BackendId>& backends,
std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(
- move(net),
+ std::move(net),
inputTensorData,
expectedOutputTensorData,
backends);
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index b750a7a918..da4c6a62be 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -97,7 +97,7 @@ void Splitter1dEndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 }, {1, expectedOutput1} };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -132,7 +132,7 @@ void Splitter2dDim0EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 }, {1, expectedOutput1} };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -170,7 +170,7 @@ void Splitter2dDim1EndToEnd(const std::vector<BackendId>& backends)
{ 1, expectedOutput1 },
{ 2, expectedOutput2 } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -218,7 +218,7 @@ void Splitter3dDim0EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 },
{ 1, expectedOutput1 } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -266,7 +266,7 @@ void Splitter3dDim1EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput0 },
{ 1, expectedOutput1 } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -306,7 +306,7 @@ void Splitter3dDim2EndToEnd(const std::vector<BackendId>& backends)
{ 1, expectedOutput1 },
{ 2, expectedOutput2 } };
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -386,7 +386,7 @@ void Splitter4dDim0EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -466,7 +466,7 @@ void Splitter4dDim1EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>
@@ -546,7 +546,7 @@ void Splitter4dDim2EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -613,7 +613,7 @@ void Splitter4dDim3EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index c6bfc5d92e..9ba90578c8 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -323,7 +323,7 @@ void StridedSlicedEndToEndTest(const std::vector<BackendId>& backends, size_t nu
std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
- AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net),
+ AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(std::move(net),
inputTensorData,
expectedOutputData,
backends,
@@ -392,7 +392,7 @@ void StridedSlicedMultiThreadedEndToEndTest(const std::vector<BackendId>& backen
outputTensors.push_back(std::map<int, std::vector<T>> {{0, outputExpected1}});
outputTensors.push_back(std::map<int, std::vector<T>> {{0, outputExpected2}});
- AsyncThreadedEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensors, outputTensors, backends, 2);
+ AsyncThreadedEndToEndTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensors, outputTensors, backends, 2);
}
} // experimental namespace
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index a94e4dd187..bc8ad5de5a 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -89,266 +89,6 @@ TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
CHECK(capabilities[0].m_Value);
}
-TEST_CASE("ConcatonXorYPaddingRequiredTest")
-{
- armnn::INetworkPtr net(armnn::INetwork::Create());
-
- // Set up tensor infos
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
- const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({2, 3, 4, 2}, armnn::DataType::Float32);
-
- armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
- descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
- descriptor.m_StrideX = descriptor.m_StrideY = 1;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-
- // Create the network
- armnn::IConnectableLayer* const input0Layer = net->AddInputLayer(0, "input_0");
- input0Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- armnn::IConnectableLayer* pooling2dLayer0 = net->AddPooling2dLayer(descriptor, "pooling2d_0");
- pooling2dLayer0->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
- input0Layer->GetOutputSlot(0).Connect(pooling2dLayer0->GetInputSlot(0));
-
- armnn::IConnectableLayer* const input1Layer = net->AddInputLayer(1, "input_1");
- input1Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- armnn::IConnectableLayer* pooling2dLayer1 = net->AddPooling2dLayer(descriptor, "pooling2d_1");
- pooling2dLayer1->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
- input1Layer->GetOutputSlot(0).Connect(pooling2dLayer1->GetInputSlot(0));
-
- std::array<armnn::TensorShape, 2> concatInputShapes = { intermediateInfo.GetShape(), intermediateInfo.GetShape() };
- armnn::IConnectableLayer* const concatLayer = net->AddConcatLayer(armnn::CreateDescriptorForConcatenation(
- concatInputShapes.begin(), concatInputShapes.end(), 2), "concatenation");
- concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- pooling2dLayer0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
- pooling2dLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
-
- armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output");
- concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
- const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
-
- // Load graph into runtime
- armnn::NetworkId networkIdentifier;
- runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
-
- // now check the concat how many sub-tensors it is using..
- auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
- {
- if (subTensorHandle && subTensorHandle->GetParent())
- {
- return true;
- }
- return false;
- };
-
- unsigned int numberOfSubTensors = 0;
- for (auto&& layer : theGraph)
- {
- if(layer->GetType() == armnn::LayerType::Concat)
- {
- for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
- {
- const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
- if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
- {
- ++numberOfSubTensors;
- }
- }
- }
- }
- // sub-tensors should not be supported in this configuration
- ARMNN_ASSERT(numberOfSubTensors == 0);
-}
-
-TEST_CASE("SplitteronXorYPaddingRequiredTest")
-{
- using namespace armnn;
-
- unsigned int splitAxis = 2;
- unsigned int numSplit = 2;
-
- const TensorShape& inputShape = { 1, 1, 4, 4 };
- const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({ 1, 1, 2, 4 }, armnn::DataType::Float32);
- const std::vector<TensorShape> outputShapes{{ 1, 1, 2, 4 },
- { 1, 1, 2, 4 }};
-
- const float qScale = 1.0f;
- const int32_t qOffset = 0;
-
- // Creates structures for input & output.
- std::vector<float> inputData{
- 9.0f, 27.0f, 18.0f, 36.0f,
- 18.0f, 9.0f, 18.0f, 9.0f,
- 27.0f, 18.0f, 9.0f, 27.0f,
- 9.0f, 27.0f, 9.0f, 18.0f,
- };
-
- std::vector<float> expectedOutput0{
- 7.0f, 11.0f, 13.0f, 9.0f,
- 7.0f, 11.0f, 13.0f, 9.0f
- };
-
- std::vector<float> expectedOutput1{
- 9.0f, 11.0f, 12.0f, 7.0f,
- 9.0f, 11.0f, 12.0f, 7.0f
- };
-
- // Builds up the structure of the network.
- INetworkPtr net(INetwork::Create());
-
- TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32, qScale, qOffset);
-
- // Pooling
- armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
- descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
- descriptor.m_StrideX = descriptor.m_StrideY = 1;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-
- // Splitter
- std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
-
- // Add current input shape to splitterDimSizes
- for (unsigned int i = 0; i < inputShape.GetNumDimensions(); ++i)
- {
- splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
- }
-
- if (splitterDimSizes[splitAxis] % numSplit != 0)
- {
- throw ParseException("Number of splits must evenly divide the dimension");
- }
-
- splitterDimSizes[splitAxis] /= numSplit;
-
- SplitterDescriptor splitDesc(numSplit, inputShape.GetNumDimensions());
-
- for (unsigned int g = 0; g < numSplit; ++g)
- {
- // Set the size of the views.
- for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
- {
- splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
- }
- splitDesc.SetViewOriginCoord(g, splitAxis, splitterDimSizes[splitAxis] * g);
- }
-
- IConnectableLayer* input = net->AddInputLayer(0, "input");
- IConnectableLayer* pooling2d0 = net->AddPooling2dLayer(descriptor, "pooling2d_0");
- IConnectableLayer* pooling2d1 = net->AddPooling2dLayer(descriptor, "pooling2d_1");
- IConnectableLayer* splitter = net->AddSplitterLayer(splitDesc, "splitter");
-
- // Connections
- Connect(input, splitter, inputTensorInfo, 0, 0);
- Connect(splitter, pooling2d0, intermediateInfo, 0, 0);
- Connect(splitter, pooling2d1, intermediateInfo, 1, 0);
-
- std::vector<IConnectableLayer*> pooling2dLayers{pooling2d0, pooling2d1};
-
- for (unsigned int i = 0; i < outputShapes.size(); ++i)
- {
- TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
- Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
- }
-
- std::map<int, std::vector<float>> inputTensorData = {{ 0,inputData }};
- std::map<int, std::vector<float>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
- const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
-
- // Load graph into runtime
- armnn::NetworkId networkIdentifier;
- runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
-
- // now check the concat how many sub-tensors it is using..
- auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
- {
- if (subTensorHandle && subTensorHandle->GetParent())
- {
- return true;
- }
- return false;
- };
-
- for (auto&& layer : theGraph)
- {
- if(layer->GetType() == armnn::LayerType::Pooling2d)
- {
- unsigned int numberOfSubTensors = 0;
- for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
- {
- const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
- if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
- {
- ++numberOfSubTensors;
- }
- }
- // sub-tensors should be supported in this configuration
- ARMNN_ASSERT(numberOfSubTensors == 0);
- }
- }
-
- InputTensors inputTensors;
- inputTensors.reserve(inputTensorData.size());
- for (auto&& it : inputTensorData)
- {
- TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
- inputTensorInfo.SetConstant(true);
- inputTensors.push_back({it.first,
- ConstTensor(inputTensorInfo, it.second.data())});
- }
- OutputTensors outputTensors;
- outputTensors.reserve(expectedOutputData.size());
- std::map<int, std::vector<float>> outputStorage;
- for (auto&& it : expectedOutputData)
- {
- std::vector<float> out(it.second.size());
- outputStorage.emplace(it.first, out);
- outputTensors.push_back({it.first,
- Tensor(runtime->GetOutputTensorInfo(networkIdentifier, it.first),
- outputStorage.at(it.first).data())});
- }
-
- // Does the inference.
- runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
-
- // Checks the results.
- float tolerance = 0.000001f;
- for (auto&& it : expectedOutputData)
- {
- std::vector<float> out = outputStorage.at(it.first);
- for (unsigned int i = 0; i < out.size(); ++i)
- {
- CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
- "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
-
- }
- }
-}
-
TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(