diff options
author | Finn Williams <Finn.Williams@arm.com> | 2020-10-22 16:53:35 +0100 |
---|---|---|
committer | Finn Williams <Finn.Williams@arm.com> | 2020-10-28 14:37:30 +0000 |
commit | 3e54d03115bc502aa85a346d68f50712ece10620 (patch) | |
tree | 40af2c15da48c33a16ddd4f45c01ff12d2577a88 /src/backends/backendsCommon/WorkloadFactory.cpp | |
parent | aaa54ae75100ddb5520698034f76d341bb036c99 (diff) | |
download | armnn-3e54d03115bc502aa85a346d68f50712ece10620.tar.gz |
IVGCVSW-5433 Remove boost::transform_iterator and make_transform_iterator
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I28aace7092cff5743353df1b1de8e7a4691554d3
Diffstat (limited to 'src/backends/backendsCommon/WorkloadFactory.cpp')
-rw-r--r-- | src/backends/backendsCommon/WorkloadFactory.cpp | 43 |
1 files changed, 23 insertions, 20 deletions
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 54a4157fe3..5e3eed086a 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -11,14 +11,13 @@ #include <armnn/ILayerSupport.hpp> #include <armnn/BackendRegistry.hpp> #include <armnn/utility/PolymorphicDowncast.hpp> +#include <armnn/utility/TransformIterator.hpp> #include <backendsCommon/WorkloadFactory.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <backendsCommon/test/WorkloadTestUtils.hpp> -#include <boost/iterator/transform_iterator.hpp> - #include <sstream> namespace armnn @@ -26,6 +25,8 @@ namespace armnn namespace { +using LayerList = std::list<Layer*>; +using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally. const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type) { @@ -667,16 +668,18 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); }; - auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo); - auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo); + + auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo); + auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo); std::vector<TensorInfo> inputs(beginI, endI); auto getTensorInfoPtr = [](const TensorInfo& info) { return &info; }; - auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr); - auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr); + + auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr); + auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr); std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -1011,8 +1014,8 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { return OverrideDataType(slot.GetTensorInfo(), dataType); }; - auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo); - auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo); + auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo); + auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo); std::vector<TensorInfo> outputs(beginI, endI); const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end()); @@ -1032,16 +1035,16 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); }; - auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo); - auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo); + auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo); + auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo); std::vector<TensorInfo> inputs(beginI, endI); auto getTensorInfoPtr = [](const TensorInfo& info) { return &info; }; - auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr); - auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr); + auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr); + auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr); std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -1063,12 +1066,12 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { return OverrideDataType(slot.GetTensorInfo(), dataType); }; - auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn); - auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn); + auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn); + auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn); std::vector<TensorInfo> inputs(beginI, endI); - auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut); - auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut); + auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut); + auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut); std::vector<TensorInfo> outputs(beginO, endO); @@ -1076,12 +1079,12 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { return &info; }; - auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr); - auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr); + auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr); + auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr); std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI); - auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr); - auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr); + auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr); + auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr); std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO); |