aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2020-10-22 16:53:35 +0100
committerFinn Williams <Finn.Williams@arm.com>2020-10-28 14:37:30 +0000
commit3e54d03115bc502aa85a346d68f50712ece10620 (patch)
tree40af2c15da48c33a16ddd4f45c01ff12d2577a88 /src
parentaaa54ae75100ddb5520698034f76d341bb036c99 (diff)
downloadarmnn-3e54d03115bc502aa85a346d68f50712ece10620.tar.gz
IVGCVSW-5433 Remove boost::transform_iterator and make_transform_iterator
Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I28aace7092cff5743353df1b1de8e7a4691554d3
Diffstat (limited to 'src')
-rw-r--r--src/armnn/Graph.hpp9
-rw-r--r--src/armnnUtils/test/TransformIteratorTest.cpp102
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp43
3 files changed, 129 insertions, 25 deletions
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 87e0da826f..731ae1e5aa 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -13,6 +13,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnn/utility/TransformIterator.hpp>
#include <list>
#include <map>
@@ -20,8 +21,6 @@
#include <unordered_set>
#include <vector>
-#include <boost/iterator/transform_iterator.hpp>
-
namespace armnn
{
@@ -51,9 +50,9 @@ public:
using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
using IteratorDifference = Iterator::difference_type;
- using ConstIterator = boost::transform_iterator<decltype(&PtrCast<const Layer>), Iterator>;
- using ConstIteratorInputs = boost::transform_iterator<decltype(&PtrCast<const InputLayer>), Iterator>;
- using ConstIteratorOutputs = boost::transform_iterator<decltype(&PtrCast<const OutputLayer>), Iterator>;
+ using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>;
+ using ConstIteratorInputs = TransformIterator<decltype(&PtrCast<const InputLayer>), Iterator>;
+ using ConstIteratorOutputs = TransformIterator<decltype(&PtrCast<const OutputLayer>), Iterator>;
/// Wrapper class returned by Graph::GetInputLayers()
struct InputLayersAccessor
diff --git a/src/armnnUtils/test/TransformIteratorTest.cpp b/src/armnnUtils/test/TransformIteratorTest.cpp
new file mode 100644
index 0000000000..c44e454312
--- /dev/null
+++ b/src/armnnUtils/test/TransformIteratorTest.cpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/utility/TransformIterator.hpp>
+
+#include <boost/test/unit_test.hpp>
+#include <iostream>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(TransformIteratorSuite)
+
+namespace
+{
+
+static int square(const int val)
+{
+ return val * val;
+}
+
+static std::string concat(const std::string val)
+{
+ return val + "a";
+}
+
+BOOST_AUTO_TEST_CASE(TransformIteratorTest)
+{
+ struct WrapperTestClass
+ {
+ TransformIterator<decltype(&square), std::vector<int>::const_iterator> begin() const
+ {
+ return { m_Vec.begin(), &square };
+ }
+
+ TransformIterator<decltype(&square), std::vector<int>::const_iterator> end() const
+ {
+ return { m_Vec.end(), &square };
+ }
+
+ const std::vector<int> m_Vec{1, 2, 3, 4, 5};
+ };
+
+ struct WrapperStringClass
+ {
+ TransformIterator<decltype(&concat), std::vector<std::string>::const_iterator> begin() const
+ {
+ return { m_Vec.begin(), &concat };
+ }
+
+ TransformIterator<decltype(&concat), std::vector<std::string>::const_iterator> end() const
+ {
+ return { m_Vec.end(), &concat };
+ }
+
+ const std::vector<std::string> m_Vec{"a", "b", "c"};
+ };
+
+ WrapperStringClass wrapperStringClass;
+ WrapperTestClass wrapperTestClass;
+ int i = 1;
+
+ for(auto val : wrapperStringClass)
+ {
+ BOOST_CHECK(val != "e");
+ i++;
+ }
+
+ i = 1;
+ for(auto val : wrapperTestClass)
+ {
+ BOOST_CHECK(val == square(i));
+ i++;
+ }
+
+ i = 1;
+ // Check original vector is unchanged
+ for(auto val : wrapperTestClass.m_Vec)
+ {
+ BOOST_CHECK(val == i);
+ i++;
+ }
+
+ std::vector<int> originalVec{1, 2, 3, 4, 5};
+
+ auto transformBegin = MakeTransformIterator(originalVec.begin(), &square);
+ auto transformEnd = MakeTransformIterator(originalVec.end(), &square);
+
+ std::vector<int> transformedVec(transformBegin, transformEnd);
+
+ i = 1;
+ for(auto val : transformedVec)
+ {
+ BOOST_CHECK(val == square(i));
+ i++;
+ }
+}
+
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 54a4157fe3..5e3eed086a 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -11,14 +11,13 @@
#include <armnn/ILayerSupport.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnn/utility/TransformIterator.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
-#include <boost/iterator/transform_iterator.hpp>
-
#include <sstream>
namespace armnn
@@ -26,6 +25,8 @@ namespace armnn
namespace
{
+using LayerList = std::list<Layer*>;
+using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
{
@@ -667,16 +668,18 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
{
return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
};
- auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
- auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
+
+ auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
+ auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
std::vector<TensorInfo> inputs(beginI, endI);
auto getTensorInfoPtr = [](const TensorInfo& info)
{
return &info;
};
- auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
- auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+
+ auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -1011,8 +1014,8 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
{
return OverrideDataType(slot.GetTensorInfo(), dataType);
};
- auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
- auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
+ auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
+ auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
std::vector<TensorInfo> outputs(beginI, endI);
const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
@@ -1032,16 +1035,16 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
{
return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
};
- auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
- auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
+ auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
+ auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
std::vector<TensorInfo> inputs(beginI, endI);
auto getTensorInfoPtr = [](const TensorInfo& info)
{
return &info;
};
- auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
- auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -1063,12 +1066,12 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
{
return OverrideDataType(slot.GetTensorInfo(), dataType);
};
- auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
- auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
+ auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
+ auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
std::vector<TensorInfo> inputs(beginI, endI);
- auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
- auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
+ auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
+ auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
std::vector<TensorInfo> outputs(beginO, endO);
@@ -1076,12 +1079,12 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
{
return &info;
};
- auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
- auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
- auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
- auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
+ auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
+ auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);