diff options
author | Jan Eilers <jan.eilers@arm.com> | 2020-04-02 13:56:54 +0100 |
---|---|---|
committer | Jan Eilers <jan.eilers@arm.com> | 2020-04-10 10:11:11 +0100 |
commit | bb446e576e120512d5752a5d6dc1ddc636f563ba (patch) | |
tree | 147d0b5f2886af208199a24704afd845a4825bf8 /src/armnn | |
parent | e5d0b93b152a26faf93538eb719d03e5b477d670 (diff) | |
download | armnn-bb446e576e120512d5752a5d6dc1ddc636f563ba.tar.gz |
IVGCVSW-4483 Remove boost::polymorphic_downcast
* exchange boost::polymorphic_downcast with armnn::PolymorphicDowncast
* remove unnecessary includes of boost::polymorphic_downcast
Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ie603fb82860fe05fee547dc78073230cc62b2e1f
Diffstat (limited to 'src/armnn')
29 files changed, 93 insertions, 63 deletions
diff --git a/src/armnn/BackendSettings.hpp b/src/armnn/BackendSettings.hpp index 211af8b539..08a8921de4 100644 --- a/src/armnn/BackendSettings.hpp +++ b/src/armnn/BackendSettings.hpp @@ -5,10 +5,11 @@ #pragma once -#include <armnn/BackendId.hpp> - #include "DeviceSpec.hpp" +#include <armnn/BackendId.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> + #include <vector> namespace armnn @@ -84,7 +85,7 @@ private: m_PreferredBackends = preferredBackends; // Obtain list of supported backends - const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec); + const DeviceSpec& spec = *PolymorphicDowncast<const DeviceSpec*>(&deviceSpec); m_SupportedBackends = spec.GetSupportedBackends(); } diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp index 0b03a3861d..22029102c1 100644 --- a/src/armnn/DynamicQuantizationVisitor.cpp +++ b/src/armnn/DynamicQuantizationVisitor.cpp @@ -6,8 +6,9 @@ #include "DynamicQuantizationVisitor.hpp" #include "NetworkUtils.hpp" -#include <armnn/utility/IgnoreUnused.hpp> #include <armnn/Descriptors.hpp> +#include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <armnn/Types.hpp> #include <limits> @@ -52,7 +53,7 @@ void DynamicQuantizationVisitor::FinishVisit() for (const IConnectableLayer* layer : m_LayersToCalibrate) { std::vector<DebugLayer*> newDebugLayers = InsertDebugLayerAfter( - m_Graph, *boost::polymorphic_downcast<Layer*>(const_cast<IConnectableLayer*>(layer))); + m_Graph, *PolymorphicDowncast<Layer*>(const_cast<IConnectableLayer*>(layer))); // record them so we can take them out again efficiently afterward m_DebugLayers.insert(std::end(m_DebugLayers), std::begin(newDebugLayers), std::end(newDebugLayers)); } diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp index 78b08ecace..ea9930bf11 100644 --- a/src/armnn/Graph.cpp +++ b/src/armnn/Graph.cpp @@ -15,7 +15,6 @@ #include <armnn/Utils.hpp> #include <armnn/utility/Assert.hpp> -#include <boost/polymorphic_cast.hpp> #include <boost/format.hpp> #include <unordered_map> diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp index 00ab8deaa0..09461885ee 100644 --- a/src/armnn/Graph.hpp +++ b/src/armnn/Graph.hpp @@ -12,6 +12,7 @@ #include <armnn/NetworkFwd.hpp> #include <armnn/Exceptions.hpp> #include <armnn/utility/Assert.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <list> #include <map> @@ -32,7 +33,7 @@ public: template <typename LayerType> static LayerType* PtrCast(Layer* const layer) { - return boost::polymorphic_downcast<LayerType*>(layer); + return PolymorphicDowncast<LayerType*>(layer); } template <typename Func> diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp index ec35d71082..59475231a8 100644 --- a/src/armnn/Layer.hpp +++ b/src/armnn/Layer.hpp @@ -18,16 +18,16 @@ #include <armnn/Tensor.hpp> #include <armnn/INetwork.hpp> #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <algorithm> +#include <functional> +#include <iostream> +#include <list> #include <memory> #include <string> #include <vector> -#include <iostream> -#include <functional> -#include <list> -#include <boost/numeric/conversion/cast.hpp> #include <boost/cast.hpp> namespace armnn @@ -145,12 +145,12 @@ public: int Connect(IInputSlot& destination) override { - return Connect(*boost::polymorphic_downcast<InputSlot*>(&destination)); + return Connect(*PolymorphicDowncast<InputSlot*>(&destination)); } void Disconnect(IInputSlot& slot) override { - return Disconnect(*boost::polymorphic_downcast<InputSlot*>(&slot)); + return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot)); } unsigned int CalculateIndexOnOwner() const override; diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 9da988b9e5..4b31fa3c8b 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -22,7 +22,6 @@ #include <LabelsAndEventClasses.hpp> -#include <boost/polymorphic_cast.hpp> #include <boost/format.hpp> namespace armnn diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index ac5159a855..c2bf27aa9b 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -24,6 +24,7 @@ #include <armnn/Logging.hpp> #include <armnn/utility/Assert.hpp> #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <ProfilingService.hpp> @@ -53,12 +54,12 @@ armnn::INetworkPtr INetwork::Create() void INetwork::Destroy(INetwork* network) { - delete boost::polymorphic_downcast<Network*>(network); + delete PolymorphicDowncast<Network*>(network); } void IOptimizedNetwork::Destroy(IOptimizedNetwork* network) { - delete boost::polymorphic_downcast<OptimizedNetwork*>(network); + delete PolymorphicDowncast<OptimizedNetwork*>(network); } Status OptimizedNetwork::PrintGraph() @@ -149,7 +150,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string template <typename LayerT> LayerT* ConvertBf16ToFp32Weight(Layer* l) { - LayerT* layer = boost::polymorphic_downcast<LayerT*>(l); + LayerT* layer = PolymorphicDowncast<LayerT*>(l); if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) && layer->m_Weight) { @@ -1015,12 +1016,12 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time."); } - const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork); + const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork); std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph()); auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy); - OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get()); + OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get()); // Get the optimized graph Graph& optGraph = optNetObjPtr->GetGraph(); diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp index d55fca68b5..3712c7be3a 100644 --- a/src/armnn/NetworkQuantizer.cpp +++ b/src/armnn/NetworkQuantizer.cpp @@ -21,6 +21,7 @@ #include <armnn/Types.hpp> #include <armnnUtils/TensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <boost/variant.hpp> @@ -44,12 +45,12 @@ INetworkQuantizerPtr INetworkQuantizer::Create(INetwork* inputNetwork, const Qua void INetworkQuantizer::Destroy(INetworkQuantizer *quantizer) { - delete boost::polymorphic_downcast<NetworkQuantizer*>(quantizer); + delete PolymorphicDowncast<NetworkQuantizer*>(quantizer); } void NetworkQuantizer::OverrideInputRange(LayerBindingId layerId, float min, float max) { - const Graph& graph = boost::polymorphic_downcast<const Network*>(m_InputNetwork)->GetGraph(); + const Graph& graph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph(); auto inputLayers = graph.GetInputLayers(); // Walk the input layers of the graph and override the quantization parameters of the one with the given id @@ -68,7 +69,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors) { m_RefineCount = 0; m_Ranges.SetDynamicMode(true); - const Graph& cGraph = boost::polymorphic_downcast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort(); + const Graph& cGraph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort(); // need to insert Debug layers in the DynamicQuantizationVisitor Graph& graph = const_cast<Graph&>(cGraph); @@ -135,7 +136,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors) INetworkPtr NetworkQuantizer::ExportNetwork() { - const Graph& graph = boost::polymorphic_downcast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort(); + const Graph& graph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort(); // Step 1) Walk the graph and populate default min/max values for // intermediate tensors, only if Runtime does not exist (created diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index 16e8a602f8..9c1ac17d70 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -4,9 +4,11 @@ // #include "Network.hpp" +#include "NetworkQuantizerUtils.hpp" #include "QuantizerVisitor.hpp" #include "StaticRangeVisitor.hpp" -#include "NetworkQuantizerUtils.hpp" + +#include <armnn/utility/PolymorphicDowncast.hpp> namespace armnn { @@ -28,7 +30,7 @@ void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* src for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++) { const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i); - const InputSlot* inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot); + const InputSlot* inputSlot = PolymorphicDowncast<const InputSlot*>(&srcInputSlot); ARMNN_ASSERT(inputSlot); const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); @@ -70,7 +72,7 @@ ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLa { ARMNN_ASSERT(srcLayer); const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0); - auto inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot); + auto inputSlot = PolymorphicDowncast<const InputSlot*>(&srcInputSlot); ARMNN_ASSERT(inputSlot); const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp index f44606c762..32c7c39f8a 100644 --- a/src/armnn/Runtime.cpp +++ b/src/armnn/Runtime.cpp @@ -10,10 +10,10 @@ #include <armnn/backends/IBackendContext.hpp> #include <backendsCommon/DynamicBackendUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <iostream> -#include <boost/polymorphic_cast.hpp> #include <backends/BackendProfiling.hpp> using namespace armnn; @@ -34,7 +34,7 @@ IRuntimePtr IRuntime::Create(const CreationOptions& options) void IRuntime::Destroy(IRuntime* runtime) { - delete boost::polymorphic_downcast<Runtime*>(runtime); + delete PolymorphicDowncast<Runtime*>(runtime); } int Runtime::GenerateNetworkId() @@ -71,7 +71,7 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut, } unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork( - std::unique_ptr<OptimizedNetwork>(boost::polymorphic_downcast<OptimizedNetwork*>(rawNetwork)), + std::unique_ptr<OptimizedNetwork>(PolymorphicDowncast<OptimizedNetwork*>(rawNetwork)), errorMessage, networkProperties, m_ProfilingService); diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp index 446485f415..d65c677e38 100644 --- a/src/armnn/SubgraphView.cpp +++ b/src/armnn/SubgraphView.cpp @@ -7,6 +7,7 @@ #include "Graph.hpp" #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <boost/numeric/conversion/cast.hpp> #include <utility> @@ -74,20 +75,20 @@ SubgraphView::SubgraphView(SubgraphView&& subgraph) SubgraphView::SubgraphView(IConnectableLayer* layer) : m_InputSlots{} , m_OutputSlots{} - , m_Layers{boost::polymorphic_downcast<Layer*>(layer)} + , m_Layers{PolymorphicDowncast<Layer*>(layer)} { unsigned int numInputSlots = layer->GetNumInputSlots(); m_InputSlots.resize(numInputSlots); for (unsigned int i = 0; i < numInputSlots; i++) { - m_InputSlots.at(i) = boost::polymorphic_downcast<InputSlot*>(&(layer->GetInputSlot(i))); + m_InputSlots.at(i) = PolymorphicDowncast<InputSlot*>(&(layer->GetInputSlot(i))); } unsigned int numOutputSlots = layer->GetNumOutputSlots(); m_OutputSlots.resize(numOutputSlots); for (unsigned int i = 0; i < numOutputSlots; i++) { - m_OutputSlots.at(i) = boost::polymorphic_downcast<OutputSlot*>(&(layer->GetOutputSlot(i))); + m_OutputSlots.at(i) = PolymorphicDowncast<OutputSlot*>(&(layer->GetOutputSlot(i))); } CheckSubgraph(); diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp index fa2fad9d4e..96e75abf8b 100644 --- a/src/armnn/SubgraphViewSelector.cpp +++ b/src/armnn/SubgraphViewSelector.cpp @@ -8,6 +8,7 @@ #include <armnn/utility/Assert.hpp> #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <algorithm> #include <map> @@ -267,7 +268,7 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos, for (auto inputSlot : layer.GetInputSlots()) { - auto connectedInput = boost::polymorphic_downcast<OutputSlot*>(inputSlot.GetConnection()); + auto connectedInput = PolymorphicDowncast<OutputSlot*>(inputSlot.GetConnection()); ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected."); Layer& inputLayer = connectedInput->GetOwningLayer(); diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 5df5ec8de5..b51303b7ee 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -6,6 +6,7 @@ #include "LayerCloneBase.hpp" #include <armnn/TypesUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/WorkloadData.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -118,7 +119,7 @@ void ConcatLayer::CreateTensors(const FactoryType& factory) if (inputLayer.GetType() == LayerType::Concat) { // Continue with the substitution if the connected inputs are also concat layers - m_ConcatLayers.push(boost::polymorphic_downcast<ConcatLayer*>(&inputLayer)); + m_ConcatLayers.push(PolymorphicDowncast<ConcatLayer*>(&inputLayer)); } ++i; } diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp index 4984cf26ee..f2057d48e9 100644 --- a/src/armnn/layers/PermuteLayer.hpp +++ b/src/armnn/layers/PermuteLayer.hpp @@ -6,6 +6,8 @@ #include "LayerWithParameters.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + namespace armnn { @@ -45,7 +47,7 @@ public: bool IsInverse(const Layer& other) const { return (other.GetType() == LayerType::Permute) && - GetPermutation().IsInverse(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation()); + GetPermutation().IsInverse(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation()); } /// Indicates if the other layer received is equal to this one. @@ -54,7 +56,7 @@ public: bool IsEqual(const Layer& other) const { return (other.GetType() == LayerType::Permute) && - GetPermutation().IsEqual(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation()); + GetPermutation().IsEqual(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation()); } void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp index 4fd5c3e11d..5e0e883822 100644 --- a/src/armnn/layers/ReshapeLayer.hpp +++ b/src/armnn/layers/ReshapeLayer.hpp @@ -6,6 +6,8 @@ #include "LayerWithParameters.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + namespace armnn { @@ -39,7 +41,7 @@ public: bool IsEqual(const Layer& other) const { return (other.GetType() == LayerType::Reshape) && - m_Param.m_TargetShape == boost::polymorphic_downcast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape; + m_Param.m_TargetShape == PolymorphicDowncast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape; } void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp index 4906bc9412..a668ce835e 100644 --- a/src/armnn/layers/TransposeLayer.hpp +++ b/src/armnn/layers/TransposeLayer.hpp @@ -6,6 +6,8 @@ #include "LayerWithParameters.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + namespace armnn { @@ -43,7 +45,7 @@ public: bool IsInverse(const Layer& other) const { return (other.GetType() == LayerType::Transpose) && - GetPermutation().IsInverse(boost::polymorphic_downcast<const TransposeLayer*>(&other)->GetPermutation()); + GetPermutation().IsInverse(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation()); } /// Indicates if the other layer received is equal to this one. @@ -52,7 +54,7 @@ public: bool IsEqual(const Layer& other) const { return (other.GetType() == LayerType::Transpose) && - GetPermutation().IsEqual(boost::polymorphic_downcast<const TransposeLayer*>(&other)->GetPermutation()); + GetPermutation().IsEqual(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation()); } void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp index 222414c8c5..ca42cacb39 100644 --- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp +++ b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp @@ -7,6 +7,8 @@ #include "NetworkUtils.hpp" #include "Optimization.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + namespace armnn { namespace optimizations @@ -15,7 +17,7 @@ namespace optimizations template <typename LayerT> inline LayerT* ConvertWeight(Layer* l) { - LayerT* layer = boost::polymorphic_downcast<LayerT*>(l); + LayerT* layer = PolymorphicDowncast<LayerT*>(l); if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) && layer->m_Weight) { diff --git a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp index e598deb977..66fffbb280 100644 --- a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp +++ b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp @@ -7,6 +7,8 @@ #include "Optimization.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + namespace armnn { namespace optimizations @@ -24,8 +26,8 @@ public: ARMNN_ASSERT(base.GetType() == LayerType::Pad); ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d); - PadLayer* padLayer = boost::polymorphic_downcast<PadLayer*>(&base); - Convolution2dLayer* convolution2dLayer = boost::polymorphic_downcast<Convolution2dLayer*>(&child); + PadLayer* padLayer = PolymorphicDowncast<PadLayer*>(&base); + Convolution2dLayer* convolution2dLayer = PolymorphicDowncast<Convolution2dLayer*>(&child); OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot(); const TensorInfo& outInfo = child.GetOutputHandler().GetTensorInfo(); diff --git a/src/armnn/optimizations/MovePermuteUp.hpp b/src/armnn/optimizations/MovePermuteUp.hpp index a7a477be84..15c6f61e97 100644 --- a/src/armnn/optimizations/MovePermuteUp.hpp +++ b/src/armnn/optimizations/MovePermuteUp.hpp @@ -6,6 +6,7 @@ #include "Optimization.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> #include <armnnUtils/Permute.hpp> namespace armnn @@ -29,7 +30,7 @@ public: if (CanMovePermuteToInputs(base)) { - auto permute = boost::polymorphic_downcast<PermuteLayer*>(&connection.GetOwningLayer()); + auto permute = PolymorphicDowncast<PermuteLayer*>(&connection.GetOwningLayer()); const PermutationVector& perm = permute->GetPermutation(); // Inserts an equivalent permute before every input of the base layer. diff --git a/src/armnn/optimizations/MoveTransposeUp.hpp b/src/armnn/optimizations/MoveTransposeUp.hpp index 66543069c8..86c018868e 100644 --- a/src/armnn/optimizations/MoveTransposeUp.hpp +++ b/src/armnn/optimizations/MoveTransposeUp.hpp @@ -6,6 +6,7 @@ #include "Optimization.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> #include <armnnUtils/Transpose.hpp> namespace armnn @@ -29,7 +30,7 @@ public: if (CanMoveTransposeToInputs(base)) { - auto transpose = boost::polymorphic_downcast<TransposeLayer*>(&connection.GetOwningLayer()); + auto transpose = PolymorphicDowncast<TransposeLayer*>(&connection.GetOwningLayer()); const PermutationVector& perm = transpose->GetPermutation(); // Inserts an equivalent transpose before every input of the base layer. diff --git a/src/armnn/optimizations/Optimization.hpp b/src/armnn/optimizations/Optimization.hpp index efe3930db8..565f543bee 100644 --- a/src/armnn/optimizations/Optimization.hpp +++ b/src/armnn/optimizations/Optimization.hpp @@ -7,6 +7,8 @@ #include "Graph.hpp" #include "LayersFwd.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + namespace armnn { @@ -37,7 +39,7 @@ public: { if (base.GetType() == LayerEnumOf<BaseType>()) { - Wrapped::Run(graph, *boost::polymorphic_downcast<BaseType*>(&base)); + Wrapped::Run(graph, *PolymorphicDowncast<BaseType*>(&base)); } } diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp index 98e87c36c6..fe0b312ce0 100644 --- a/src/armnn/optimizations/OptimizeInversePermutes.hpp +++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp @@ -7,6 +7,7 @@ #include "Optimization.hpp" #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> namespace armnn { @@ -23,9 +24,9 @@ public: { IgnoreUnused(graph); Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer(); - auto child = boost::polymorphic_downcast<PermuteType*>(&connection.GetOwningLayer()); + auto child = PolymorphicDowncast<PermuteType*>(&connection.GetOwningLayer()); - if (child->IsInverse(*boost::polymorphic_downcast<PermuteType*>(&base))) + if (child->IsInverse(*PolymorphicDowncast<PermuteType*>(&base))) { // Bypass both layers. Child will be removed as it's left unconnected. // Base layer will be removed if left unconnected. diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp index bac27c06a7..d836a9c549 100644 --- a/src/armnn/optimizations/SquashEqualSiblings.hpp +++ b/src/armnn/optimizations/SquashEqualSiblings.hpp @@ -7,6 +7,7 @@ #include "Optimization.hpp" #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> namespace armnn { @@ -32,7 +33,7 @@ public: if (baseOutput.GetNumConnections() > 1) { - auto& comparableChild = *boost::polymorphic_downcast<Comparable*>(&child); + auto& comparableChild = *PolymorphicDowncast<Comparable*>(&child); Layer* lowestPriorityChild = &child; for (auto&& it : baseOutput.GetConnections()) diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 72ad9d45ef..b6ffd216e0 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -12,6 +12,7 @@ #include <armnnUtils/DataLayoutIndexed.hpp> #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/WorkloadData.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -34,7 +35,7 @@ template<typename Workload> std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, const IWorkloadFactory& factory) { std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory); - BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()), + BOOST_TEST(workload.get() == PolymorphicDowncast<Workload*>(workload.get()), "Cannot convert to derived class"); std::string reasonIfUnsupported; layer.SetBackendId(factory.GetBackendId()); diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp index 30e5c879ee..8e6af313b7 100644 --- a/src/armnn/test/GraphTests.cpp +++ b/src/armnn/test/GraphTests.cpp @@ -9,6 +9,7 @@ #include <armnn/TypesUtils.hpp> #include <armnn/Exceptions.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <armnn/backends/IBackendInternal.hpp> @@ -274,7 +275,7 @@ static std::vector<Edge> GetEdgeList(const armnn::Graph& graph) const unsigned int numConnections = outputSlot.GetNumConnections(); for (unsigned int c = 0; c < numConnections; ++c) { - auto inputSlot = boost::polymorphic_downcast<const armnn::InputSlot*>(outputSlot.GetConnection(c)); + auto inputSlot = armnn::PolymorphicDowncast<const armnn::InputSlot*>(outputSlot.GetConnection(c)); edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer()); } } diff --git a/src/armnn/test/GraphUtils.cpp b/src/armnn/test/GraphUtils.cpp index 1f9bb44d3d..36db900a2d 100644 --- a/src/armnn/test/GraphUtils.cpp +++ b/src/armnn/test/GraphUtils.cpp @@ -5,6 +5,8 @@ #include "GraphUtils.hpp" +#include <armnn/utility/PolymorphicDowncast.hpp> + bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name) { for (auto&& layer : graph) @@ -52,7 +54,7 @@ bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer, const unsigned int numConnections = outputSlot.GetNumConnections(); for (unsigned int c = 0; c < numConnections; ++c) { - auto inputSlot = boost::polymorphic_downcast<const armnn::InputSlot*>(outputSlot.GetConnection(c)); + auto inputSlot = armnn::PolymorphicDowncast<const armnn::InputSlot*>(outputSlot.GetConnection(c)); if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() && inputSlot->GetSlotIndex() == destSlot) { diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index c7883ffdb8..ca85e11021 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -15,6 +15,7 @@ #include <armnn/LayerVisitorBase.hpp> #include <armnnUtils/FloatingPointConverter.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <backendsCommon/IBackendInternal.hpp> @@ -695,7 +696,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) const char* name = nullptr) override { IgnoreUnused(id, name); - auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer); + auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer); BOOST_TEST((inputLayer->GetBackendId() == "MockBackend")); } @@ -704,7 +705,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) const char* name = nullptr) override { IgnoreUnused(id, name); - auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer); + auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer); BOOST_TEST((outputLayer->GetBackendId() == "MockBackend")); } @@ -713,7 +714,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) const char* name = nullptr) override { IgnoreUnused(activationDescriptor, name); - auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer); + auto activation = PolymorphicDowncast<const ActivationLayer*>(layer); BOOST_TEST((activation->GetBackendId() == "CustomBackend")); } }; @@ -765,7 +766,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy); - OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get()); + OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get()); // Get the optimized graph Graph& optGraph = optNetObjPtr->GetGraph(); diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index ebdfbc5a40..669703ca54 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -15,6 +15,7 @@ #include <armnn/Tensor.hpp> #include <armnn/Types.hpp> #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <armnnQuantizer/INetworkQuantizer.hpp> #include <QuantizeHelper.hpp> @@ -190,7 +191,7 @@ private: void VisitLayersTopologically(const INetwork* inputNetwork, ILayerVisitor& visitor) { - auto network = boost::polymorphic_downcast<const Network*>(inputNetwork); + auto network = PolymorphicDowncast<const Network*>(inputNetwork); auto graph = network->GetGraph().TopologicalSort(); VisitLayers(graph, visitor); @@ -346,7 +347,7 @@ BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant) { INetworkPtr network = CreateNetworkWithInputOutputLayers(); - armnn::TensorInfo tensorInfo = GetInputTensorInfo(boost::polymorphic_downcast<const Network*>(network.get())); + armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast<const Network*>(network.get())); // Outliers -56 and 98 std::vector<float> inputData({0, 0, 0, -56, 98, 0, 0, 0}); @@ -3033,12 +3034,12 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) reLULayer2->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32)); addLayer1->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32)); - TestConnectionPreservation visitor1(boost::polymorphic_downcast<const Network*>(network.get())->GetGraph()); + TestConnectionPreservation visitor1(PolymorphicDowncast<const Network*>(network.get())->GetGraph()); VisitLayersTopologically(network.get(), visitor1); armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(network.get()); - armnn::TensorInfo tensorInfo = GetInputTensorInfo(boost::polymorphic_downcast<const Network*>(network.get())); + armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast<const Network*>(network.get())); std::vector<float> inputData({0, 2, 0, 4}); armnn::ConstTensor inputTensor(tensorInfo, inputData.data()); @@ -3049,7 +3050,7 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) INetworkPtr quantNetwork = quantizer->ExportNetwork(); - TestConnectionPreservation visitor2(boost::polymorphic_downcast<const Network*>(quantNetwork.get())->GetGraph()); + TestConnectionPreservation visitor2(PolymorphicDowncast<const Network*>(quantNetwork.get())->GetGraph()); VisitLayersTopologically(quantNetwork.get(), visitor2); } diff --git a/src/armnn/test/UtilityTests.cpp b/src/armnn/test/UtilityTests.cpp index 7be5c9518a..d5779c1a76 100644 --- a/src/armnn/test/UtilityTests.cpp +++ b/src/armnn/test/UtilityTests.cpp @@ -45,12 +45,12 @@ BOOST_AUTO_TEST_CASE(PolymorphicDowncast) Base* base1 = &child1; auto ptr1 = dynamic_cast<Child1*>(base1); BOOST_CHECK(ptr1 != nullptr); - BOOST_CHECK_NO_THROW(polymorphic_downcast<Child1*>(base1)); - BOOST_CHECK(polymorphic_downcast<Child1*>(base1) == ptr1); + BOOST_CHECK_NO_THROW(armnn::PolymorphicDowncast<Child1*>(base1)); + BOOST_CHECK(armnn::PolymorphicDowncast<Child1*>(base1) == ptr1); auto ptr2 = dynamic_cast<Child2*>(base1); BOOST_CHECK(ptr2 == nullptr); - BOOST_CHECK_THROW(polymorphic_downcast<Child2*>(base1), std::bad_cast); + BOOST_CHECK_THROW(armnn::PolymorphicDowncast<Child2*>(base1), std::bad_cast); armnn::IgnoreUnused(ptr1, ptr2); } |