From bb446e576e120512d5752a5d6dc1ddc636f563ba Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Thu, 2 Apr 2020 13:56:54 +0100 Subject: IVGCVSW-4483 Remove boost::polymorphic_downcast * exchange boost::polymorphic_downcast with armnn::PolymorphicDowncast * remove unnecessary includes of boost::polymorphic_downcast Signed-off-by: Jan Eilers Change-Id: Ie603fb82860fe05fee547dc78073230cc62b2e1f --- include/armnn/utility/PolymorphicDowncast.hpp | 4 +- src/armnn/BackendSettings.hpp | 7 +- src/armnn/DynamicQuantizationVisitor.cpp | 5 +- src/armnn/Graph.cpp | 1 - src/armnn/Graph.hpp | 3 +- src/armnn/Layer.hpp | 12 +-- src/armnn/LoadedNetwork.cpp | 1 - src/armnn/Network.cpp | 11 +- src/armnn/NetworkQuantizer.cpp | 9 +- src/armnn/QuantizerVisitor.cpp | 8 +- src/armnn/Runtime.cpp | 6 +- src/armnn/SubgraphView.cpp | 7 +- src/armnn/SubgraphViewSelector.cpp | 3 +- src/armnn/layers/ConcatLayer.cpp | 3 +- src/armnn/layers/PermuteLayer.hpp | 6 +- src/armnn/layers/ReshapeLayer.hpp | 4 +- src/armnn/layers/TransposeLayer.hpp | 6 +- .../optimizations/ConvertFp32NetworkToBf16.hpp | 4 +- .../optimizations/FoldPadIntoConvolution2d.hpp | 6 +- src/armnn/optimizations/MovePermuteUp.hpp | 3 +- src/armnn/optimizations/MoveTransposeUp.hpp | 3 +- src/armnn/optimizations/Optimization.hpp | 4 +- .../optimizations/OptimizeInversePermutes.hpp | 5 +- src/armnn/optimizations/SquashEqualSiblings.hpp | 3 +- src/armnn/test/CreateWorkload.hpp | 3 +- src/armnn/test/GraphTests.cpp | 3 +- src/armnn/test/GraphUtils.cpp | 4 +- src/armnn/test/OptimizerTests.cpp | 9 +- src/armnn/test/QuantizerTest.cpp | 11 +- src/armnn/test/UtilityTests.cpp | 6 +- src/armnnDeserializer/Deserializer.cpp | 3 - .../test/DetectionPostProcess.cpp | 10 +- src/armnnTfLiteParser/test/Unsupported.cpp | 4 +- src/armnnTfParser/TfParser.cpp | 47 +++++---- src/armnnTfParser/test/Assert.cpp | 6 +- src/backends/aclCommon/BaseMemoryManager.cpp | 1 - .../aclCommon/test/CreateWorkloadClNeon.hpp | 9 +- src/backends/backendsCommon/MemCopyWorkload.cpp | 6 +- src/backends/backendsCommon/WorkloadFactory.cpp | 80 +++++++-------- src/backends/backendsCommon/WorkloadUtils.cpp | 2 + src/backends/backendsCommon/WorkloadUtils.hpp | 8 +- .../backendsCommon/test/DynamicBackendTests.hpp | 25 ++--- .../backendsCommon/test/OptimizationViewsTests.cpp | 16 +-- src/backends/cl/ClBackendContext.cpp | 5 +- src/backends/cl/ClTensorHandleFactory.cpp | 6 +- src/backends/cl/ClWorkloadFactory.cpp | 4 +- src/backends/cl/test/ClCreateWorkloadTests.cpp | 114 ++++++++++----------- src/backends/cl/workloads/ClAbsWorkload.cpp | 6 +- src/backends/cl/workloads/ClNegWorkload.cpp | 5 +- src/backends/cl/workloads/ClRsqrtWorkload.cpp | 5 +- src/backends/cl/workloads/ClSliceWorkload.cpp | 5 +- .../cl/workloads/ClSpaceToDepthWorkload.cpp | 1 - src/backends/neon/NeonTensorHandleFactory.cpp | 3 +- src/backends/neon/NeonWorkloadFactory.cpp | 3 +- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 95 +++++++++-------- src/backends/neon/workloads/NeonAbsWorkload.cpp | 7 +- .../neon/workloads/NeonActivationWorkload.cpp | 6 +- .../neon/workloads/NeonAdditionWorkload.cpp | 7 +- .../neon/workloads/NeonArgMinMaxWorkload.cpp | 5 +- .../workloads/NeonBatchNormalizationWorkload.cpp | 7 +- .../neon/workloads/NeonConstantWorkload.cpp | 5 +- .../neon/workloads/NeonConvolution2dWorkload.cpp | 7 +- .../neon/workloads/NeonDequantizeWorkload.cpp | 5 +- .../workloads/NeonDetectionPostProcessWorkload.cpp | 5 +- .../neon/workloads/NeonDivisionWorkload.cpp | 8 +- .../neon/workloads/NeonFloorFloatWorkload.cpp | 8 +- .../neon/workloads/NeonFullyConnectedWorkload.cpp | 5 +- .../workloads/NeonL2NormalizationFloatWorkload.cpp | 5 +- .../neon/workloads/NeonMaximumWorkload.cpp | 7 +- .../neon/workloads/NeonMinimumWorkload.cpp | 8 +- .../neon/workloads/NeonMultiplicationWorkload.cpp | 8 +- src/backends/neon/workloads/NeonNegWorkload.cpp | 7 +- .../workloads/NeonNormalizationFloatWorkload.cpp | 5 +- .../neon/workloads/NeonPooling2dWorkload.cpp | 6 +- src/backends/neon/workloads/NeonPreluWorkload.cpp | 8 +- .../neon/workloads/NeonReshapeWorkload.cpp | 8 +- src/backends/neon/workloads/NeonResizeWorkload.cpp | 6 +- src/backends/neon/workloads/NeonRsqrtWorkload.cpp | 6 +- src/backends/neon/workloads/NeonSliceWorkload.cpp | 6 +- .../neon/workloads/NeonSoftmaxFloatWorkload.cpp | 6 +- .../neon/workloads/NeonSoftmaxUint8Workload.cpp | 5 +- .../neon/workloads/NeonSpaceToDepthWorkload.cpp | 6 +- .../neon/workloads/NeonSplitterWorkload.cpp | 3 +- .../neon/workloads/NeonStridedSliceWorkload.cpp | 5 +- .../neon/workloads/NeonSubtractionWorkload.cpp | 7 +- .../NeonTransposeConvolution2dWorkload.cpp | 5 +- .../reference/test/RefCreateWorkloadTests.cpp | 27 ++--- .../reference/workloads/RefWorkloadUtils.hpp | 4 +- src/profiling/test/ProfilingTests.hpp | 4 +- 89 files changed, 461 insertions(+), 395 deletions(-) diff --git a/include/armnn/utility/PolymorphicDowncast.hpp b/include/armnn/utility/PolymorphicDowncast.hpp index d529867474..b4a5cad314 100644 --- a/include/armnn/utility/PolymorphicDowncast.hpp +++ b/include/armnn/utility/PolymorphicDowncast.hpp @@ -30,11 +30,11 @@ namespace armnn template -DestType polymorphic_downcast(SourceType value) +DestType PolymorphicDowncast(SourceType value) { static_assert(std::is_pointer::value && std::is_pointer::value, - "polymorphic_downcast only works with pointer types."); + "PolymorphicDowncast only works with pointer types."); ARMNN_POLYMORPHIC_CAST_CHECK(dynamic_cast(value) == static_cast(value)); return static_cast(value); diff --git a/src/armnn/BackendSettings.hpp b/src/armnn/BackendSettings.hpp index 211af8b539..08a8921de4 100644 --- a/src/armnn/BackendSettings.hpp +++ b/src/armnn/BackendSettings.hpp @@ -5,10 +5,11 @@ #pragma once -#include - #include "DeviceSpec.hpp" +#include +#include + #include namespace armnn @@ -84,7 +85,7 @@ private: m_PreferredBackends = preferredBackends; // Obtain list of supported backends - const DeviceSpec& spec = *boost::polymorphic_downcast(&deviceSpec); + const DeviceSpec& spec = *PolymorphicDowncast(&deviceSpec); m_SupportedBackends = spec.GetSupportedBackends(); } diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp index 0b03a3861d..22029102c1 100644 --- a/src/armnn/DynamicQuantizationVisitor.cpp +++ b/src/armnn/DynamicQuantizationVisitor.cpp @@ -6,8 +6,9 @@ #include "DynamicQuantizationVisitor.hpp" #include "NetworkUtils.hpp" -#include #include +#include +#include #include #include @@ -52,7 +53,7 @@ void DynamicQuantizationVisitor::FinishVisit() for (const IConnectableLayer* layer : m_LayersToCalibrate) { std::vector newDebugLayers = InsertDebugLayerAfter( - m_Graph, *boost::polymorphic_downcast(const_cast(layer))); + m_Graph, *PolymorphicDowncast(const_cast(layer))); // record them so we can take them out again efficiently afterward m_DebugLayers.insert(std::end(m_DebugLayers), std::begin(newDebugLayers), std::end(newDebugLayers)); } diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp index 78b08ecace..ea9930bf11 100644 --- a/src/armnn/Graph.cpp +++ b/src/armnn/Graph.cpp @@ -15,7 +15,6 @@ #include #include -#include #include #include diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp index 00ab8deaa0..09461885ee 100644 --- a/src/armnn/Graph.hpp +++ b/src/armnn/Graph.hpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -32,7 +33,7 @@ public: template static LayerType* PtrCast(Layer* const layer) { - return boost::polymorphic_downcast(layer); + return PolymorphicDowncast(layer); } template diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp index ec35d71082..59475231a8 100644 --- a/src/armnn/Layer.hpp +++ b/src/armnn/Layer.hpp @@ -18,16 +18,16 @@ #include #include #include +#include #include +#include +#include +#include #include #include #include -#include -#include -#include -#include #include namespace armnn @@ -145,12 +145,12 @@ public: int Connect(IInputSlot& destination) override { - return Connect(*boost::polymorphic_downcast(&destination)); + return Connect(*PolymorphicDowncast(&destination)); } void Disconnect(IInputSlot& slot) override { - return Disconnect(*boost::polymorphic_downcast(&slot)); + return Disconnect(*PolymorphicDowncast(&slot)); } unsigned int CalculateIndexOnOwner() const override; diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 9da988b9e5..4b31fa3c8b 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -22,7 +22,6 @@ #include -#include #include namespace armnn diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index ac5159a855..c2bf27aa9b 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -53,12 +54,12 @@ armnn::INetworkPtr INetwork::Create() void INetwork::Destroy(INetwork* network) { - delete boost::polymorphic_downcast(network); + delete PolymorphicDowncast(network); } void IOptimizedNetwork::Destroy(IOptimizedNetwork* network) { - delete boost::polymorphic_downcast(network); + delete PolymorphicDowncast(network); } Status OptimizedNetwork::PrintGraph() @@ -149,7 +150,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional LayerT* ConvertBf16ToFp32Weight(Layer* l) { - LayerT* layer = boost::polymorphic_downcast(l); + LayerT* layer = PolymorphicDowncast(l); if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) && layer->m_Weight) { @@ -1015,12 +1016,12 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time."); } - const Network& network = *boost::polymorphic_downcast(&inNetwork); + const Network& network = *PolymorphicDowncast(&inNetwork); std::unique_ptr graph = std::make_unique(network.GetGraph()); auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy); - OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast(optNet.get()); + OptimizedNetwork* optNetObjPtr = PolymorphicDowncast(optNet.get()); // Get the optimized graph Graph& optGraph = optNetObjPtr->GetGraph(); diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp index d55fca68b5..3712c7be3a 100644 --- a/src/armnn/NetworkQuantizer.cpp +++ b/src/armnn/NetworkQuantizer.cpp @@ -21,6 +21,7 @@ #include #include +#include #include @@ -44,12 +45,12 @@ INetworkQuantizerPtr INetworkQuantizer::Create(INetwork* inputNetwork, const Qua void INetworkQuantizer::Destroy(INetworkQuantizer *quantizer) { - delete boost::polymorphic_downcast(quantizer); + delete PolymorphicDowncast(quantizer); } void NetworkQuantizer::OverrideInputRange(LayerBindingId layerId, float min, float max) { - const Graph& graph = boost::polymorphic_downcast(m_InputNetwork)->GetGraph(); + const Graph& graph = PolymorphicDowncast(m_InputNetwork)->GetGraph(); auto inputLayers = graph.GetInputLayers(); // Walk the input layers of the graph and override the quantization parameters of the one with the given id @@ -68,7 +69,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors) { m_RefineCount = 0; m_Ranges.SetDynamicMode(true); - const Graph& cGraph = boost::polymorphic_downcast(m_InputNetwork)->GetGraph().TopologicalSort(); + const Graph& cGraph = PolymorphicDowncast(m_InputNetwork)->GetGraph().TopologicalSort(); // need to insert Debug layers in the DynamicQuantizationVisitor Graph& graph = const_cast(cGraph); @@ -135,7 +136,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors) INetworkPtr NetworkQuantizer::ExportNetwork() { - const Graph& graph = boost::polymorphic_downcast(m_InputNetwork)->GetGraph().TopologicalSort(); + const Graph& graph = PolymorphicDowncast(m_InputNetwork)->GetGraph().TopologicalSort(); // Step 1) Walk the graph and populate default min/max values for // intermediate tensors, only if Runtime does not exist (created diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index 16e8a602f8..9c1ac17d70 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -4,9 +4,11 @@ // #include "Network.hpp" +#include "NetworkQuantizerUtils.hpp" #include "QuantizerVisitor.hpp" #include "StaticRangeVisitor.hpp" -#include "NetworkQuantizerUtils.hpp" + +#include namespace armnn { @@ -28,7 +30,7 @@ void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* src for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++) { const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i); - const InputSlot* inputSlot = boost::polymorphic_downcast(&srcInputSlot); + const InputSlot* inputSlot = PolymorphicDowncast(&srcInputSlot); ARMNN_ASSERT(inputSlot); const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); @@ -70,7 +72,7 @@ ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLa { ARMNN_ASSERT(srcLayer); const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0); - auto inputSlot = boost::polymorphic_downcast(&srcInputSlot); + auto inputSlot = PolymorphicDowncast(&srcInputSlot); ARMNN_ASSERT(inputSlot); const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot(); diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp index f44606c762..32c7c39f8a 100644 --- a/src/armnn/Runtime.cpp +++ b/src/armnn/Runtime.cpp @@ -10,10 +10,10 @@ #include #include +#include #include -#include #include using namespace armnn; @@ -34,7 +34,7 @@ IRuntimePtr IRuntime::Create(const CreationOptions& options) void IRuntime::Destroy(IRuntime* runtime) { - delete boost::polymorphic_downcast(runtime); + delete PolymorphicDowncast(runtime); } int Runtime::GenerateNetworkId() @@ -71,7 +71,7 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut, } unique_ptr loadedNetwork = LoadedNetwork::MakeLoadedNetwork( - std::unique_ptr(boost::polymorphic_downcast(rawNetwork)), + std::unique_ptr(PolymorphicDowncast(rawNetwork)), errorMessage, networkProperties, m_ProfilingService); diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp index 446485f415..d65c677e38 100644 --- a/src/armnn/SubgraphView.cpp +++ b/src/armnn/SubgraphView.cpp @@ -7,6 +7,7 @@ #include "Graph.hpp" #include +#include #include #include @@ -74,20 +75,20 @@ SubgraphView::SubgraphView(SubgraphView&& subgraph) SubgraphView::SubgraphView(IConnectableLayer* layer) : m_InputSlots{} , m_OutputSlots{} - , m_Layers{boost::polymorphic_downcast(layer)} + , m_Layers{PolymorphicDowncast(layer)} { unsigned int numInputSlots = layer->GetNumInputSlots(); m_InputSlots.resize(numInputSlots); for (unsigned int i = 0; i < numInputSlots; i++) { - m_InputSlots.at(i) = boost::polymorphic_downcast(&(layer->GetInputSlot(i))); + m_InputSlots.at(i) = PolymorphicDowncast(&(layer->GetInputSlot(i))); } unsigned int numOutputSlots = layer->GetNumOutputSlots(); m_OutputSlots.resize(numOutputSlots); for (unsigned int i = 0; i < numOutputSlots; i++) { - m_OutputSlots.at(i) = boost::polymorphic_downcast(&(layer->GetOutputSlot(i))); + m_OutputSlots.at(i) = PolymorphicDowncast(&(layer->GetOutputSlot(i))); } CheckSubgraph(); diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp index fa2fad9d4e..96e75abf8b 100644 --- a/src/armnn/SubgraphViewSelector.cpp +++ b/src/armnn/SubgraphViewSelector.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -267,7 +268,7 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos, for (auto inputSlot : layer.GetInputSlots()) { - auto connectedInput = boost::polymorphic_downcast(inputSlot.GetConnection()); + auto connectedInput = PolymorphicDowncast(inputSlot.GetConnection()); ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected."); Layer& inputLayer = connectedInput->GetOwningLayer(); diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 5df5ec8de5..b51303b7ee 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -6,6 +6,7 @@ #include "LayerCloneBase.hpp" #include +#include #include #include @@ -118,7 +119,7 @@ void ConcatLayer::CreateTensors(const FactoryType& factory) if (inputLayer.GetType() == LayerType::Concat) { // Continue with the substitution if the connected inputs are also concat layers - m_ConcatLayers.push(boost::polymorphic_downcast(&inputLayer)); + m_ConcatLayers.push(PolymorphicDowncast(&inputLayer)); } ++i; } diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp index 4984cf26ee..f2057d48e9 100644 --- a/src/armnn/layers/PermuteLayer.hpp +++ b/src/armnn/layers/PermuteLayer.hpp @@ -6,6 +6,8 @@ #include "LayerWithParameters.hpp" +#include + namespace armnn { @@ -45,7 +47,7 @@ public: bool IsInverse(const Layer& other) const { return (other.GetType() == LayerType::Permute) && - GetPermutation().IsInverse(boost::polymorphic_downcast(&other)->GetPermutation()); + GetPermutation().IsInverse(PolymorphicDowncast(&other)->GetPermutation()); } /// Indicates if the other layer received is equal to this one. @@ -54,7 +56,7 @@ public: bool IsEqual(const Layer& other) const { return (other.GetType() == LayerType::Permute) && - GetPermutation().IsEqual(boost::polymorphic_downcast(&other)->GetPermutation()); + GetPermutation().IsEqual(PolymorphicDowncast(&other)->GetPermutation()); } void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp index 4fd5c3e11d..5e0e883822 100644 --- a/src/armnn/layers/ReshapeLayer.hpp +++ b/src/armnn/layers/ReshapeLayer.hpp @@ -6,6 +6,8 @@ #include "LayerWithParameters.hpp" +#include + namespace armnn { @@ -39,7 +41,7 @@ public: bool IsEqual(const Layer& other) const { return (other.GetType() == LayerType::Reshape) && - m_Param.m_TargetShape == boost::polymorphic_downcast(&other)->m_Param.m_TargetShape; + m_Param.m_TargetShape == PolymorphicDowncast(&other)->m_Param.m_TargetShape; } void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp index 4906bc9412..a668ce835e 100644 --- a/src/armnn/layers/TransposeLayer.hpp +++ b/src/armnn/layers/TransposeLayer.hpp @@ -6,6 +6,8 @@ #include "LayerWithParameters.hpp" +#include + namespace armnn { @@ -43,7 +45,7 @@ public: bool IsInverse(const Layer& other) const { return (other.GetType() == LayerType::Transpose) && - GetPermutation().IsInverse(boost::polymorphic_downcast(&other)->GetPermutation()); + GetPermutation().IsInverse(PolymorphicDowncast(&other)->GetPermutation()); } /// Indicates if the other layer received is equal to this one. @@ -52,7 +54,7 @@ public: bool IsEqual(const Layer& other) const { return (other.GetType() == LayerType::Transpose) && - GetPermutation().IsEqual(boost::polymorphic_downcast(&other)->GetPermutation()); + GetPermutation().IsEqual(PolymorphicDowncast(&other)->GetPermutation()); } void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp index 222414c8c5..ca42cacb39 100644 --- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp +++ b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp @@ -7,6 +7,8 @@ #include "NetworkUtils.hpp" #include "Optimization.hpp" +#include + namespace armnn { namespace optimizations @@ -15,7 +17,7 @@ namespace optimizations template inline LayerT* ConvertWeight(Layer* l) { - LayerT* layer = boost::polymorphic_downcast(l); + LayerT* layer = PolymorphicDowncast(l); if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected) && layer->m_Weight) { diff --git a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp index e598deb977..66fffbb280 100644 --- a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp +++ b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp @@ -7,6 +7,8 @@ #include "Optimization.hpp" +#include + namespace armnn { namespace optimizations @@ -24,8 +26,8 @@ public: ARMNN_ASSERT(base.GetType() == LayerType::Pad); ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d); - PadLayer* padLayer = boost::polymorphic_downcast(&base); - Convolution2dLayer* convolution2dLayer = boost::polymorphic_downcast(&child); + PadLayer* padLayer = PolymorphicDowncast(&base); + Convolution2dLayer* convolution2dLayer = PolymorphicDowncast(&child); OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot(); const TensorInfo& outInfo = child.GetOutputHandler().GetTensorInfo(); diff --git a/src/armnn/optimizations/MovePermuteUp.hpp b/src/armnn/optimizations/MovePermuteUp.hpp index a7a477be84..15c6f61e97 100644 --- a/src/armnn/optimizations/MovePermuteUp.hpp +++ b/src/armnn/optimizations/MovePermuteUp.hpp @@ -6,6 +6,7 @@ #include "Optimization.hpp" +#include #include namespace armnn @@ -29,7 +30,7 @@ public: if (CanMovePermuteToInputs(base)) { - auto permute = boost::polymorphic_downcast(&connection.GetOwningLayer()); + auto permute = PolymorphicDowncast(&connection.GetOwningLayer()); const PermutationVector& perm = permute->GetPermutation(); // Inserts an equivalent permute before every input of the base layer. diff --git a/src/armnn/optimizations/MoveTransposeUp.hpp b/src/armnn/optimizations/MoveTransposeUp.hpp index 66543069c8..86c018868e 100644 --- a/src/armnn/optimizations/MoveTransposeUp.hpp +++ b/src/armnn/optimizations/MoveTransposeUp.hpp @@ -6,6 +6,7 @@ #include "Optimization.hpp" +#include #include namespace armnn @@ -29,7 +30,7 @@ public: if (CanMoveTransposeToInputs(base)) { - auto transpose = boost::polymorphic_downcast(&connection.GetOwningLayer()); + auto transpose = PolymorphicDowncast(&connection.GetOwningLayer()); const PermutationVector& perm = transpose->GetPermutation(); // Inserts an equivalent transpose before every input of the base layer. diff --git a/src/armnn/optimizations/Optimization.hpp b/src/armnn/optimizations/Optimization.hpp index efe3930db8..565f543bee 100644 --- a/src/armnn/optimizations/Optimization.hpp +++ b/src/armnn/optimizations/Optimization.hpp @@ -7,6 +7,8 @@ #include "Graph.hpp" #include "LayersFwd.hpp" +#include + namespace armnn { @@ -37,7 +39,7 @@ public: { if (base.GetType() == LayerEnumOf()) { - Wrapped::Run(graph, *boost::polymorphic_downcast(&base)); + Wrapped::Run(graph, *PolymorphicDowncast(&base)); } } diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp index 98e87c36c6..fe0b312ce0 100644 --- a/src/armnn/optimizations/OptimizeInversePermutes.hpp +++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp @@ -7,6 +7,7 @@ #include "Optimization.hpp" #include +#include namespace armnn { @@ -23,9 +24,9 @@ public: { IgnoreUnused(graph); Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer(); - auto child = boost::polymorphic_downcast(&connection.GetOwningLayer()); + auto child = PolymorphicDowncast(&connection.GetOwningLayer()); - if (child->IsInverse(*boost::polymorphic_downcast(&base))) + if (child->IsInverse(*PolymorphicDowncast(&base))) { // Bypass both layers. Child will be removed as it's left unconnected. // Base layer will be removed if left unconnected. diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp index bac27c06a7..d836a9c549 100644 --- a/src/armnn/optimizations/SquashEqualSiblings.hpp +++ b/src/armnn/optimizations/SquashEqualSiblings.hpp @@ -7,6 +7,7 @@ #include "Optimization.hpp" #include +#include namespace armnn { @@ -32,7 +33,7 @@ public: if (baseOutput.GetNumConnections() > 1) { - auto& comparableChild = *boost::polymorphic_downcast(&child); + auto& comparableChild = *PolymorphicDowncast(&child); Layer* lowestPriorityChild = &child; for (auto&& it : baseOutput.GetConnections()) diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 72ad9d45ef..b6ffd216e0 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -34,7 +35,7 @@ template std::unique_ptr MakeAndCheckWorkload(Layer& layer, const IWorkloadFactory& factory) { std::unique_ptr workload = layer.CreateWorkload(factory); - BOOST_TEST(workload.get() == boost::polymorphic_downcast(workload.get()), + BOOST_TEST(workload.get() == PolymorphicDowncast(workload.get()), "Cannot convert to derived class"); std::string reasonIfUnsupported; layer.SetBackendId(factory.GetBackendId()); diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp index 30e5c879ee..8e6af313b7 100644 --- a/src/armnn/test/GraphTests.cpp +++ b/src/armnn/test/GraphTests.cpp @@ -9,6 +9,7 @@ #include #include +#include #include @@ -274,7 +275,7 @@ static std::vector GetEdgeList(const armnn::Graph& graph) const unsigned int numConnections = outputSlot.GetNumConnections(); for (unsigned int c = 0; c < numConnections; ++c) { - auto inputSlot = boost::polymorphic_downcast(outputSlot.GetConnection(c)); + auto inputSlot = armnn::PolymorphicDowncast(outputSlot.GetConnection(c)); edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer()); } } diff --git a/src/armnn/test/GraphUtils.cpp b/src/armnn/test/GraphUtils.cpp index 1f9bb44d3d..36db900a2d 100644 --- a/src/armnn/test/GraphUtils.cpp +++ b/src/armnn/test/GraphUtils.cpp @@ -5,6 +5,8 @@ #include "GraphUtils.hpp" +#include + bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name) { for (auto&& layer : graph) @@ -52,7 +54,7 @@ bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer, const unsigned int numConnections = outputSlot.GetNumConnections(); for (unsigned int c = 0; c < numConnections; ++c) { - auto inputSlot = boost::polymorphic_downcast(outputSlot.GetConnection(c)); + auto inputSlot = armnn::PolymorphicDowncast(outputSlot.GetConnection(c)); if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() && inputSlot->GetSlotIndex() == destSlot) { diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index c7883ffdb8..ca85e11021 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -695,7 +696,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) const char* name = nullptr) override { IgnoreUnused(id, name); - auto inputLayer = boost::polymorphic_downcast(layer); + auto inputLayer = PolymorphicDowncast(layer); BOOST_TEST((inputLayer->GetBackendId() == "MockBackend")); } @@ -704,7 +705,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) const char* name = nullptr) override { IgnoreUnused(id, name); - auto outputLayer = boost::polymorphic_downcast(layer); + auto outputLayer = PolymorphicDowncast(layer); BOOST_TEST((outputLayer->GetBackendId() == "MockBackend")); } @@ -713,7 +714,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) const char* name = nullptr) override { IgnoreUnused(activationDescriptor, name); - auto activation = boost::polymorphic_downcast(layer); + auto activation = PolymorphicDowncast(layer); BOOST_TEST((activation->GetBackendId() == "CustomBackend")); } }; @@ -765,7 +766,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest) auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy); - OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast(optNet.get()); + OptimizedNetwork* optNetObjPtr = PolymorphicDowncast(optNet.get()); // Get the optimized graph Graph& optGraph = optNetObjPtr->GetGraph(); diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index ebdfbc5a40..669703ca54 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -190,7 +191,7 @@ private: void VisitLayersTopologically(const INetwork* inputNetwork, ILayerVisitor& visitor) { - auto network = boost::polymorphic_downcast(inputNetwork); + auto network = PolymorphicDowncast(inputNetwork); auto graph = network->GetGraph().TopologicalSort(); VisitLayers(graph, visitor); @@ -346,7 +347,7 @@ BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant) { INetworkPtr network = CreateNetworkWithInputOutputLayers(); - armnn::TensorInfo tensorInfo = GetInputTensorInfo(boost::polymorphic_downcast(network.get())); + armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast(network.get())); // Outliers -56 and 98 std::vector inputData({0, 0, 0, -56, 98, 0, 0, 0}); @@ -3033,12 +3034,12 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) reLULayer2->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32)); addLayer1->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32)); - TestConnectionPreservation visitor1(boost::polymorphic_downcast(network.get())->GetGraph()); + TestConnectionPreservation visitor1(PolymorphicDowncast(network.get())->GetGraph()); VisitLayersTopologically(network.get(), visitor1); armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(network.get()); - armnn::TensorInfo tensorInfo = GetInputTensorInfo(boost::polymorphic_downcast(network.get())); + armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast(network.get())); std::vector inputData({0, 2, 0, 4}); armnn::ConstTensor inputTensor(tensorInfo, inputData.data()); @@ -3049,7 +3050,7 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant) INetworkPtr quantNetwork = quantizer->ExportNetwork(); - TestConnectionPreservation visitor2(boost::polymorphic_downcast(quantNetwork.get())->GetGraph()); + TestConnectionPreservation visitor2(PolymorphicDowncast(quantNetwork.get())->GetGraph()); VisitLayersTopologically(quantNetwork.get(), visitor2); } diff --git a/src/armnn/test/UtilityTests.cpp b/src/armnn/test/UtilityTests.cpp index 7be5c9518a..d5779c1a76 100644 --- a/src/armnn/test/UtilityTests.cpp +++ b/src/armnn/test/UtilityTests.cpp @@ -45,12 +45,12 @@ BOOST_AUTO_TEST_CASE(PolymorphicDowncast) Base* base1 = &child1; auto ptr1 = dynamic_cast(base1); BOOST_CHECK(ptr1 != nullptr); - BOOST_CHECK_NO_THROW(polymorphic_downcast(base1)); - BOOST_CHECK(polymorphic_downcast(base1) == ptr1); + BOOST_CHECK_NO_THROW(armnn::PolymorphicDowncast(base1)); + BOOST_CHECK(armnn::PolymorphicDowncast(base1) == ptr1); auto ptr2 = dynamic_cast(base1); BOOST_CHECK(ptr2 == nullptr); - BOOST_CHECK_THROW(polymorphic_downcast(base1), std::bad_cast); + BOOST_CHECK_THROW(armnn::PolymorphicDowncast(base1), std::bad_cast); armnn::IgnoreUnused(ptr1, ptr2); } diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 2975675ff1..42b0052b03 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -21,10 +21,7 @@ #include #include -#include -#include #include -#include #include #include diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp index f12b2b94d6..cb4173a620 100644 --- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp +++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp @@ -4,16 +4,16 @@ // #include "../TfLiteParser.hpp" - -#include -#include "test/GraphUtils.hpp" - #include "ParserFlatbuffersFixture.hpp" #include "ParserPrototxtFixture.hpp" #include "ParserHelper.hpp" +#include "test/GraphUtils.hpp" +#include #include +#include + BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) struct DetectionPostProcessFixture : ParserFlatbuffersFixture @@ -241,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPo auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec()); - auto optimizedNetwork = boost::polymorphic_downcast(optimized.get()); + auto optimizedNetwork = armnn::PolymorphicDowncast(optimized.get()); auto graph = optimizedNetwork->GetGraph(); // Check the number of layers in the graph diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp index 21392ace02..dd77bcacb1 100644 --- a/src/armnnTfLiteParser/test/Unsupported.cpp +++ b/src/armnnTfLiteParser/test/Unsupported.cpp @@ -8,10 +8,10 @@ #include #include +#include #include -#include #include #include @@ -47,7 +47,7 @@ public: BOOST_CHECK(descriptor.m_NumOutputs == numOutputs); BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs); - const StandInLayer* standInLayer = boost::polymorphic_downcast(layer); + const StandInLayer* standInLayer = PolymorphicDowncast(layer); for (unsigned int i = 0u; i < numInputs; ++i) { const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot(); diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp index 491a9648cd..7a7c5a4375 100755 --- a/src/armnnTfParser/TfParser.cpp +++ b/src/armnnTfParser/TfParser.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -21,11 +22,9 @@ #include -#include #include #include -#include - +#include #include using namespace armnnUtils; @@ -1220,7 +1219,7 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef, % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* weightNode = - boost::polymorphic_downcast *>(inputs[1].m_IndexedValue); + PolymorphicDowncast *>(inputs[1].m_IndexedValue); std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding"); std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format"); @@ -1364,7 +1363,7 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n } ParsedConstTfOperation* weightNode = - boost::polymorphic_downcast *>(inputs[1].m_IndexedValue); + PolymorphicDowncast *>(inputs[1].m_IndexedValue); std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding"); std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format"); @@ -1578,7 +1577,7 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* scaleNode = - boost::polymorphic_downcast *>(inputs[1].m_IndexedValue); + PolymorphicDowncast *>(inputs[1].m_IndexedValue); if (!HasParsedConstTensor(inputs[2].m_IndexedValue->GetNode().name())) { @@ -1592,7 +1591,7 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* offsetNode = - boost::polymorphic_downcast *>(inputs[2].m_IndexedValue); + PolymorphicDowncast *>(inputs[2].m_IndexedValue); if (!HasParsedConstTensor(inputs[3].m_IndexedValue->GetNode().name())) { @@ -1606,7 +1605,7 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* meanNode = - boost::polymorphic_downcast *>(inputs[3].m_IndexedValue); + PolymorphicDowncast *>(inputs[3].m_IndexedValue); if (!HasParsedConstTensor(inputs[4].m_IndexedValue->GetNode().name())) { @@ -1620,7 +1619,7 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* varianceNode = - boost::polymorphic_downcast *>(inputs[4].m_IndexedValue); + PolymorphicDowncast *>(inputs[4].m_IndexedValue); const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC"); CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm"); @@ -1689,7 +1688,7 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef if (HasParsedConstTensor(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name())) { ParsedConstTfOperation* alpha = - boost::polymorphic_downcast *>( + PolymorphicDowncast *>( inputs[alphaLayerIndex].m_IndexedValue); std::vector const_data; @@ -2079,7 +2078,7 @@ ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef const auto constInput = inputs[GetConstInputIndex(inputs)]; auto* permuteVectorInput = - boost::polymorphic_downcast*>(constInput.m_IndexedValue); + PolymorphicDowncast*>(constInput.m_IndexedValue); const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo(); std::vector permuteVectorData; @@ -2177,7 +2176,7 @@ ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef, } ParsedConstTfOperation* paddingTensorOp = - boost::polymorphic_downcast*>(inputs[1].m_IndexedValue); + PolymorphicDowncast*>(inputs[1].m_IndexedValue); std::vector paddingTensorData; ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData); @@ -2244,7 +2243,7 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef, unsigned int index = GetConstInputIndex(inputs); // Get the axis tensor data ParsedConstTfOperation* shapeNode = - boost::polymorphic_downcast*>(inputs[index].m_IndexedValue); + PolymorphicDowncast*>(inputs[index].m_IndexedValue); std::vector axisTensorData; shapeNode->GetConstTensor(axisTensorData); @@ -2377,7 +2376,7 @@ ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef, % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* shapeNode = - boost::polymorphic_downcast*>(inputs[1].m_IndexedValue); + PolymorphicDowncast*>(inputs[1].m_IndexedValue); armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index); TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo(); @@ -2415,7 +2414,7 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no % CHECK_LOCATION().AsString())); } ParsedConstTfOperation* sizeNode = - boost::polymorphic_downcast*>(inputs[1].m_IndexedValue); + PolymorphicDowncast*>(inputs[1].m_IndexedValue); // Checks the align_corners attribute is not set. if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false)) @@ -2630,7 +2629,7 @@ ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, con bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims"); ParsedConstTfOperation* axisNode = - boost::polymorphic_downcast*>(inputs[1].m_IndexedValue); + PolymorphicDowncast*>(inputs[1].m_IndexedValue); const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo(); @@ -2810,7 +2809,7 @@ ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef, unsigned int index = GetConstInputIndex(inputs); // Get the axis tensor data ParsedConstTfOperation* shapeNode = - boost::polymorphic_downcast*>(inputs[index].m_IndexedValue); + PolymorphicDowncast*>(inputs[index].m_IndexedValue); std::vector axisTensorData; shapeNode->GetConstTensor(axisTensorData); @@ -2913,17 +2912,17 @@ ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& node std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs); ParsedConstTfOperation* beginNode = - boost::polymorphic_downcast *>(inputs[1].m_IndexedValue); + PolymorphicDowncast *>(inputs[1].m_IndexedValue); std::vector beginTensorData; beginNode->GetConstTensor(beginTensorData); ParsedConstTfOperation* endNode = - boost::polymorphic_downcast *>(inputs[2].m_IndexedValue); + PolymorphicDowncast *>(inputs[2].m_IndexedValue); std::vector endTensorData; endNode->GetConstTensor(endTensorData); ParsedConstTfOperation* stridesNode = - boost::polymorphic_downcast *>(inputs[3].m_IndexedValue); + PolymorphicDowncast *>(inputs[3].m_IndexedValue); std::vector stridesTensorData; stridesNode->GetConstTensor(stridesTensorData); @@ -3311,11 +3310,11 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m // Finds our inputs. if (HasParsedConstTensor(addInputs[0].m_IndexedValue->GetNode().name())) { - biasNode = boost::polymorphic_downcast*>(addInputs[0].m_IndexedValue); + biasNode = PolymorphicDowncast*>(addInputs[0].m_IndexedValue); } else if (HasParsedConstTensor(addInputs[1].m_IndexedValue->GetNode().name())) { - biasNode = boost::polymorphic_downcast*>(addInputs[1].m_IndexedValue); + biasNode = PolymorphicDowncast*>(addInputs[1].m_IndexedValue); } else { @@ -3339,13 +3338,13 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m std::vector mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2); if (HasParsedConstTensor(mulInputs[0].m_IndexedValue->GetNode().name())) { - weightNode = boost::polymorphic_downcast*>(mulInputs[0].m_IndexedValue); + weightNode = PolymorphicDowncast*>(mulInputs[0].m_IndexedValue); inputNode = mulInputs[1].m_IndexedValue; inputIdx = mulInputs[1].m_Index; } else if (HasParsedConstTensor(mulInputs[1].m_IndexedValue->GetNode().name())) { - weightNode = boost::polymorphic_downcast*>(mulInputs[1].m_IndexedValue); + weightNode = PolymorphicDowncast*>(mulInputs[1].m_IndexedValue); inputNode = mulInputs[0].m_IndexedValue; inputIdx = mulInputs[0].m_Index; } diff --git a/src/armnnTfParser/test/Assert.cpp b/src/armnnTfParser/test/Assert.cpp index 111f158641..b978f0264d 100644 --- a/src/armnnTfParser/test/Assert.cpp +++ b/src/armnnTfParser/test/Assert.cpp @@ -7,6 +7,8 @@ #include "ParserPrototxtFixture.hpp" #include "test/GraphUtils.hpp" +#include + #include BOOST_AUTO_TEST_SUITE(TensorflowParser) @@ -100,7 +102,7 @@ BOOST_FIXTURE_TEST_CASE(AssertSimpleGraphStructureTest, AssertSimpleFixture) { auto optimized = SetupOptimizedNetwork({ { "Placeholder", { 1, 1, 1, 4 } } }, { "Add" }); - auto optimizedNetwork = boost::polymorphic_downcast(optimized.get()); + auto optimizedNetwork = armnn::PolymorphicDowncast(optimized.get()); auto graph = optimizedNetwork->GetGraph(); BOOST_TEST((graph.GetNumInputs() == 1)); @@ -256,7 +258,7 @@ BOOST_FIXTURE_TEST_CASE(AssertGraphStructureTest, AssertFixture) { "Input1", { 1, 1, 2, 2 } } }, { "Output" }); - auto optimizedNetwork = boost::polymorphic_downcast(optimized.get()); + auto optimizedNetwork = armnn::PolymorphicDowncast(optimized.get()); auto graph = optimizedNetwork->GetGraph(); BOOST_TEST((graph.GetNumInputs() == 2)); diff --git a/src/backends/aclCommon/BaseMemoryManager.cpp b/src/backends/aclCommon/BaseMemoryManager.cpp index b43eaf8da3..aaadc9479a 100644 --- a/src/backends/aclCommon/BaseMemoryManager.cpp +++ b/src/backends/aclCommon/BaseMemoryManager.cpp @@ -10,7 +10,6 @@ #include "arm_compute/runtime/OffsetLifetimeManager.h" #endif -#include namespace armnn { diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp index 83cec2a746..b14e148287 100644 --- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp +++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp @@ -6,6 +6,7 @@ #include +#include #include #include #include @@ -93,8 +94,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory) MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData(); BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor1.m_Inputs[0]); - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor1.m_Outputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor1.m_Inputs[0]); + auto outputHandle1 = PolymorphicDowncast(queueDescriptor1.m_Outputs[0]); BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32))); BOOST_TEST(CompareTensorHandleShape(outputHandle1, {2, 3})); @@ -102,8 +103,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory) MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData(); BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor2.m_Inputs[0]); - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor2.m_Outputs[0]); + auto inputHandle2 = PolymorphicDowncast(queueDescriptor2.m_Inputs[0]); + auto outputHandle2 = PolymorphicDowncast(queueDescriptor2.m_Outputs[0]); BOOST_TEST(CompareTensorHandleShape(inputHandle2, {2, 3})); BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32))); } diff --git a/src/backends/backendsCommon/MemCopyWorkload.cpp b/src/backends/backendsCommon/MemCopyWorkload.cpp index 572c0fcc57..c1aa79cb10 100644 --- a/src/backends/backendsCommon/MemCopyWorkload.cpp +++ b/src/backends/backendsCommon/MemCopyWorkload.cpp @@ -8,7 +8,7 @@ #include #include -#include +#include #include @@ -27,9 +27,9 @@ void GatherTensorHandlePairs(const MemCopyQueueDescriptor& descriptor, for (unsigned int i = 0; i < numInputs; ++i) { - SrcTensorHandleType* const srcTensorHandle = boost::polymorphic_downcast( + SrcTensorHandleType* const srcTensorHandle = PolymorphicDowncast( descriptor.m_Inputs[i]); - DstTensorHandleType* const dstTensorHandle = boost::polymorphic_downcast( + DstTensorHandleType* const dstTensorHandle = PolymorphicDowncast( descriptor.m_Outputs[i]); tensorHandlePairs.emplace_back(srcTensorHandle, dstTensorHandle); diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index a7e8576668..c55c70a1f7 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -18,7 +19,6 @@ #include -#include #include #include @@ -49,7 +49,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, { Optional reason = outReasonIfUnsupported; bool result; - const Layer& layer = *(boost::polymorphic_downcast(&connectableLayer)); + const Layer& layer = *(PolymorphicDowncast(&connectableLayer)); auto const& backendRegistry = BackendRegistryInstance(); if (!backendRegistry.IsBackendRegistered(backendId)) @@ -70,7 +70,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, { case LayerType::Activation: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsActivationSupported( @@ -94,7 +94,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::ArgMinMax: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters(); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -108,7 +108,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::BatchNormalization: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo(); @@ -130,7 +130,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), @@ -140,7 +140,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Comparison: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); @@ -189,7 +189,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Convolution2d: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); @@ -227,7 +227,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::DepthToSpace: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -240,7 +240,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::DepthwiseConvolution2d: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); @@ -277,7 +277,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::DetectionPostProcess: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo(); @@ -301,7 +301,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::ElementwiseUnary: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -314,7 +314,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::FakeQuantization: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType), cLayer->GetParameters(), @@ -332,7 +332,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::FullyConnected: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); @@ -414,7 +414,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::InstanceNormalization: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters(); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -429,7 +429,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::L2Normalization: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const L2NormalizationDescriptor& descriptor = cLayer->GetParameters(); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -444,7 +444,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::LogSoftmax: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -457,7 +457,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Lstm: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const LstmDescriptor& descriptor = cLayer->GetParameters(); // All inputs. @@ -645,7 +645,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Concat: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); // Get vector of all inputs. auto getTensorInfo = [&dataType](const InputSlot& slot) @@ -685,7 +685,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Normalization: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType), @@ -702,7 +702,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Permute: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType), @@ -713,7 +713,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Pad: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsPadSupported( @@ -725,7 +725,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Pooling2d: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType), @@ -736,7 +736,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::PreCompiled: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType), cLayer->GetParameters(), @@ -752,7 +752,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::QLstm: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const QLstmDescriptor& descriptor = cLayer->GetParameters(); // Inputs @@ -840,7 +840,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::QuantizedLstm: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); // Inputs const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -904,7 +904,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Reshape: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType), @@ -915,7 +915,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Resize: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType), @@ -926,7 +926,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Slice: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -939,7 +939,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Softmax: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType), @@ -950,7 +950,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::SpaceToBatchNd: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType), @@ -961,7 +961,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::SpaceToDepth: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); @@ -974,7 +974,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Splitter: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); // Get vector of all outputs. @@ -996,7 +996,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Stack: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); // Get vector of all inputs. auto getTensorInfo = [&dataType](const InputSlot& slot) @@ -1023,7 +1023,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::StandIn: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); // Get vector of all inputs. auto getTensorInfoIn = [&dataType](const InputSlot& slot) @@ -1064,7 +1064,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::StridedSlice: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType), @@ -1100,7 +1100,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Mean: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsMeanSupported( @@ -1134,7 +1134,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::Transpose: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType), @@ -1145,7 +1145,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } case LayerType::TransposeConvolution2d: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = PolymorphicDowncast(&layer); const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); @@ -1188,7 +1188,7 @@ bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLaye Optional dataType, std::string& outReasonIfUnsupported) { - auto layer = boost::polymorphic_downcast(&connectableLayer); + auto layer = PolymorphicDowncast(&connectableLayer); return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported); } diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp index bd5e81e678..37915cfc4d 100644 --- a/src/backends/backendsCommon/WorkloadUtils.cpp +++ b/src/backends/backendsCommon/WorkloadUtils.cpp @@ -7,6 +7,8 @@ #include +#include + namespace armnn { diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp index a4da924725..354362ec8f 100644 --- a/src/backends/backendsCommon/WorkloadUtils.hpp +++ b/src/backends/backendsCommon/WorkloadUtils.hpp @@ -8,15 +8,13 @@ #include "CpuTensorHandle.hpp" #include - #include - +#include #include #include #include -#include namespace armnn { @@ -198,9 +196,9 @@ void GatherTensorHandlePairs(const DescriptorType& descriptor, for (unsigned int i = 0; i < numInputs; ++i) { SrcTensorHandleType* const srcTensorHandle = - boost::polymorphic_downcast(descriptor.m_Inputs[i]); + PolymorphicDowncast(descriptor.m_Inputs[i]); DstTensorHandleType* const dstTensorHandle = - boost::polymorphic_downcast(descriptor.m_Outputs[i]); + PolymorphicDowncast(descriptor.m_Outputs[i]); tensorHandlePairs.emplace_back(srcTensorHandle, dstTensorHandle); } diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp index 1276776a4d..6371e53250 100644 --- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp +++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp @@ -6,15 +6,12 @@ #pragma once #include -#include - #include - -#include +#include +#include #include - +#include #include - #include #include @@ -1212,7 +1209,7 @@ void RuntimeEmptyTestImpl() IRuntime::CreationOptions creationOptions; IRuntimePtr runtime = IRuntime::Create(creationOptions); - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.empty()); @@ -1253,7 +1250,7 @@ void RuntimeDynamicBackendsTestImpl() BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end())); } - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.size() == expectedRegisteredbackendIds.size()); for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds) @@ -1294,7 +1291,7 @@ void RuntimeDuplicateDynamicBackendsTestImpl() BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end())); } - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.size() == expectedRegisteredbackendIds.size()); for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds) @@ -1323,7 +1320,7 @@ void RuntimeInvalidDynamicBackendsTestImpl() const BackendRegistry& backendRegistry = BackendRegistryInstance(); BOOST_TEST(backendRegistry.Size() == 0); - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.empty()); } @@ -1343,7 +1340,7 @@ void RuntimeInvalidOverridePathTestImpl() const BackendRegistry& backendRegistry = BackendRegistryInstance(); BOOST_TEST(backendRegistry.Size() == 0); - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.empty()); } @@ -1382,7 +1379,7 @@ void CreateReferenceDynamicBackendTestImpl() BackendIdSet backendIds = backendRegistry.GetBackendIds(); BOOST_TEST((backendIds.find("CpuRef") != backendIds.end())); - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.size() == 1); BOOST_TEST((supportedBackendIds.find("CpuRef") != supportedBackendIds.end())); @@ -1433,7 +1430,7 @@ void CreateReferenceDynamicBackendTestImpl() // Create a convolution workload with the dummy settings auto workload = referenceWorkloadFactory->CreateConvolution2d(convolution2dQueueDescriptor, workloadInfo); BOOST_TEST((workload != nullptr)); - BOOST_TEST(workload.get() == boost::polymorphic_downcast(workload.get())); + BOOST_TEST(workload.get() == PolymorphicDowncast(workload.get())); } #endif @@ -1453,7 +1450,7 @@ void CreateSampleDynamicBackendTestImpl() BackendIdSet backendIds = backendRegistry.GetBackendIds(); BOOST_TEST((backendIds.find("SampleDynamic") != backendIds.end())); - const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + const DeviceSpec& deviceSpec = *PolymorphicDowncast(&runtime->GetDeviceSpec()); BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); BOOST_TEST(supportedBackendIds.size()>= 1); BOOST_TEST((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end())); diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp index 3aebe3e964..c972b4b15f 100644 --- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp +++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp @@ -3,15 +3,19 @@ // SPDX-License-Identifier: MIT // -#include + +#include "CommonTestUtils.hpp" +#include "MockBackend.hpp" + +#include +#include #include +#include #include #include -#include -#include -#include "CommonTestUtils.hpp" -#include "MockBackend.hpp" +#include + using namespace armnn; @@ -208,7 +212,7 @@ BOOST_AUTO_TEST_CASE(OptimizeViewsValidateDeviceMockBackend) BOOST_CHECK(optNet); // Check the optimised graph - OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast(optNet.get()); + OptimizedNetwork* optNetObjPtr = PolymorphicDowncast(optNet.get()); CheckLayers(optNetObjPtr->GetGraph()); } diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp index f612c3743d..bfe93bdc01 100644 --- a/src/backends/cl/ClBackendContext.cpp +++ b/src/backends/cl/ClBackendContext.cpp @@ -8,14 +8,13 @@ #include #include +#include #include #include #include #include -#include - namespace armnn { @@ -161,7 +160,7 @@ ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options) bool useLegacyTunerAPI = options.m_GpuAccTunedParameters.get() != nullptr; if (useLegacyTunerAPI) { - auto clTunerParams = boost::polymorphic_downcast( + auto clTunerParams = PolymorphicDowncast( options.m_GpuAccTunedParameters.get()); tuner = &clTunerParams->m_Tuner; diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp index 9df3f1a4a6..8af97f41e2 100644 --- a/src/backends/cl/ClTensorHandleFactory.cpp +++ b/src/backends/cl/ClTensorHandleFactory.cpp @@ -7,12 +7,12 @@ #include "ClTensorHandleFactory.hpp" #include "ClTensorHandle.hpp" +#include + #include #include #include -#include - namespace armnn { @@ -42,7 +42,7 @@ std::unique_ptr ClTensorHandleFactory::CreateSubTensorHandle(ITen } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr ClTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index b1bd46c4d7..b0d2fdf835 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -24,7 +25,6 @@ #include #include -#include #include namespace armnn @@ -125,7 +125,7 @@ std::unique_ptr ClWorkloadFactory::CreateSubTensorHandle(ITensorH } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index 92e771760f..b09b26f9b3 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -6,6 +6,7 @@ #include "ClContextControlFixture.hpp" #include "ClWorkloadFactoryHelper.hpp" +#include #include #include @@ -35,8 +36,8 @@ static void ClCreateActivationWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). ActivationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 1})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 1})); @@ -66,9 +67,9 @@ static void ClCreateElementwiseWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of CreateElementwiseWorkloadTest). DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3})); BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); @@ -159,8 +160,8 @@ static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op) DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); @@ -184,8 +185,8 @@ static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); switch (dataLayout) { @@ -232,8 +233,8 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload) auto workload = CreateConvertFp16ToFp32WorkloadTest(factory, graph); ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3})); @@ -250,8 +251,8 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload) auto workload = CreateConvertFp32ToFp16WorkloadTest(factory, graph); ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3})); @@ -277,8 +278,8 @@ static void ClConvolution2dWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((inputHandle->GetShape() == inputShape)); BOOST_TEST((outputHandle->GetShape() == outputShape)); } @@ -315,8 +316,8 @@ static void ClDepthwiseConvolutionWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest). DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list({ 2, 2, 5, 5 }) : std::initializer_list({ 2, 5, 5, 2 }); @@ -343,8 +344,8 @@ static void ClDirectConvolution2dWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest). Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6})); } @@ -376,8 +377,8 @@ static void ClCreateFullyConnectedWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7})); } @@ -404,8 +405,8 @@ static void ClNormalizationWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list({3, 5, 5, 1}) : std::initializer_list({3, 1, 5, 5}); @@ -452,8 +453,8 @@ static void ClPooling2dWorkloadTest(DataLayout dataLayout) // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest). Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((inputHandle->GetShape() == inputShape)); BOOST_TEST((outputHandle->GetShape() == outputShape)); @@ -497,9 +498,9 @@ static void ClCreatePreluWorkloadTest(const armnn::TensorShape& inputShape, // Checks that outputs and inputs are as we expect them (see definition of CreatePreluWorkloadTest). PreluQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto alphaHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto alphaHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((inputHandle->GetShape() == inputShape)); BOOST_TEST((alphaHandle->GetShape() == alphaShape)); @@ -532,8 +533,8 @@ static void ClCreateReshapeWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 4})); @@ -565,8 +566,8 @@ static void ClSoftmaxWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload). SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1})); @@ -594,16 +595,16 @@ static void ClSplitterWorkloadTest() // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7})); - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7})); - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7})); - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {1, 7, 7})); } @@ -738,8 +739,8 @@ static void ClL2NormalizationWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list({ 5, 20, 50, 67 }) : std::initializer_list({ 5, 50, 67, 20 }); @@ -780,8 +781,8 @@ static void ClCreateLstmWorkloadTest() auto workload = CreateLstmWorkloadTest(factory, graph); LstmQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 })); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 })); } @@ -802,8 +803,8 @@ static void ClResizeWorkloadTest(DataLayout dataLayout) auto queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); switch (dataLayout) { @@ -859,8 +860,8 @@ static void ClMeanWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of CreateMeanWorkloadTest). MeanQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); // The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL. BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 })); @@ -893,9 +894,9 @@ static void ClCreateConcatWorkloadTest(std::initializer_list outpu auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); ConcatQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle0 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 })); BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 })); @@ -942,8 +943,8 @@ static void ClSpaceToDepthWorkloadTest() auto workload = CreateSpaceToDepthWorkloadTest(factory, graph); SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 })); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 })); @@ -990,10 +991,10 @@ static void ClCreateStackWorkloadTest(const std::initializer_list& StackQueueDescriptor queueDescriptor = workload->GetData(); for (unsigned int i = 0; i < numInputs; ++i) { - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[i]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape)); } - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); } @@ -1016,7 +1017,6 @@ template static void ClCreateQuantizedLstmWorkloadTest() { using namespace armnn::armcomputetensorutils; - using boost::polymorphic_downcast; Graph graph; ClWorkloadFactory factory = @@ -1026,23 +1026,23 @@ static void ClCreateQuantizedLstmWorkloadTest() QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData(); - IAclTensorHandle* inputHandle = polymorphic_downcast(queueDescriptor.m_Inputs[0]); + IAclTensorHandle* inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2}))); BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8)); - IAclTensorHandle* cellStateInHandle = polymorphic_downcast(queueDescriptor.m_Inputs[1]); + IAclTensorHandle* cellStateInHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16)); - IAclTensorHandle* outputStateInHandle = polymorphic_downcast(queueDescriptor.m_Inputs[2]); + IAclTensorHandle* outputStateInHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[2]); BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8)); - IAclTensorHandle* cellStateOutHandle = polymorphic_downcast(queueDescriptor.m_Outputs[0]); + IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16)); - IAclTensorHandle* outputStateOutHandle = polymorphic_downcast(queueDescriptor.m_Outputs[1]); + IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8)); } diff --git a/src/backends/cl/workloads/ClAbsWorkload.cpp b/src/backends/cl/workloads/ClAbsWorkload.cpp index 058c453c6b..d020eeb344 100644 --- a/src/backends/cl/workloads/ClAbsWorkload.cpp +++ b/src/backends/cl/workloads/ClAbsWorkload.cpp @@ -7,6 +7,8 @@ #include "ClWorkloadUtils.hpp" +#include + #include #include @@ -29,8 +31,8 @@ ClAbsWorkload::ClAbsWorkload(const AbsQueueDescriptor& descriptor, const Workloa { m_Data.ValidateInputsOutputs("ClAbsWorkload", 1, 1); - arm_compute::ICLTensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ICLTensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ICLTensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_AbsLayer.configure(&input, &output); } diff --git a/src/backends/cl/workloads/ClNegWorkload.cpp b/src/backends/cl/workloads/ClNegWorkload.cpp index cc6333fff9..9f83cd32c3 100644 --- a/src/backends/cl/workloads/ClNegWorkload.cpp +++ b/src/backends/cl/workloads/ClNegWorkload.cpp @@ -8,6 +8,7 @@ #include "ClWorkloadUtils.hpp" #include +#include #include @@ -29,8 +30,8 @@ ClNegWorkload::ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("ClNegWorkload", 1, 1); - arm_compute::ICLTensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ICLTensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ICLTensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_NegLayer.configure(&input, &output); } diff --git a/src/backends/cl/workloads/ClRsqrtWorkload.cpp b/src/backends/cl/workloads/ClRsqrtWorkload.cpp index be687595f7..a305a4a919 100644 --- a/src/backends/cl/workloads/ClRsqrtWorkload.cpp +++ b/src/backends/cl/workloads/ClRsqrtWorkload.cpp @@ -8,6 +8,7 @@ #include "ClWorkloadUtils.hpp" #include +#include #include @@ -29,8 +30,8 @@ ClRsqrtWorkload::ClRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, const W { m_Data.ValidateInputsOutputs("ClRsqrtWorkload", 1, 1); - arm_compute::ICLTensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ICLTensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ICLTensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_RsqrtLayer.configure(&input, &output); } diff --git a/src/backends/cl/workloads/ClSliceWorkload.cpp b/src/backends/cl/workloads/ClSliceWorkload.cpp index fa99e7f54d..5ea4c4cefd 100644 --- a/src/backends/cl/workloads/ClSliceWorkload.cpp +++ b/src/backends/cl/workloads/ClSliceWorkload.cpp @@ -8,6 +8,7 @@ #include "ClWorkloadUtils.hpp" #include +#include #include @@ -36,8 +37,8 @@ ClSliceWorkload::ClSliceWorkload(const SliceQueueDescriptor& descriptor, const W { m_Data.ValidateInputsOutputs("ClSliceWorkload", 1, 1); - arm_compute::ICLTensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ICLTensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ICLTensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::Coordinates starts; arm_compute::Coordinates ends; diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp index d541e4ec52..1acb5c64e6 100644 --- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp +++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp @@ -10,7 +10,6 @@ #include #include #include -#include namespace armnn { diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp index 26b14af144..a8b5b81412 100644 --- a/src/backends/neon/NeonTensorHandleFactory.cpp +++ b/src/backends/neon/NeonTensorHandleFactory.cpp @@ -7,6 +7,7 @@ #include "NeonTensorHandle.hpp" #include +#include namespace armnn { @@ -36,7 +37,7 @@ std::unique_ptr NeonTensorHandleFactory::CreateSubTensorHandle(IT } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 47f72050a5..b3104b9576 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -69,7 +70,7 @@ std::unique_ptr NeonWorkloadFactory::CreateSubTensorHandle(ITenso } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo, diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 3e1888cb54..447bad155f 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -6,6 +6,7 @@ #include "NeonWorkloadFactoryHelper.hpp" #include +#include #include #include @@ -72,8 +73,8 @@ static void NeonCreateActivationWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). ActivationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType))); } @@ -103,9 +104,9 @@ static void NeonCreateElementwiseWorkloadTest() auto workload = CreateElementwiseWorkloadTest(factory, graph); DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); @@ -201,8 +202,8 @@ static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3}; @@ -247,8 +248,8 @@ static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayo // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -287,8 +288,8 @@ static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list({ 2, 2, 5, 5 }) : std::initializer_list({ 2, 5, 5, 2 }); @@ -322,8 +323,8 @@ static void NeonCreateFullyConnectedWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType))); } @@ -351,8 +352,8 @@ static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5}; @@ -398,8 +399,8 @@ static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout:: // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -449,9 +450,9 @@ static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape, // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). PreluQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto alphaHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto alphaHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType))); BOOST_TEST(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType))); @@ -485,8 +486,8 @@ static void NeonCreateReshapeWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType))); } @@ -518,8 +519,8 @@ static void NeonCreateResizeWorkloadTest(DataLayout dataLayout) auto queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); switch (dataLayout) { @@ -565,8 +566,8 @@ static void NeonCreateSoftmaxWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType))); } @@ -593,8 +594,8 @@ static void NeonSpaceToDepthWorkloadTest() auto workload = CreateSpaceToDepthWorkloadTest(factory, graph); SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType))); @@ -630,16 +631,16 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32))); - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32))); - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32))); - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); } @@ -743,8 +744,8 @@ static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; @@ -788,8 +789,8 @@ static void NeonCreateLstmWorkloadTest() LstmQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32))); @@ -811,9 +812,9 @@ static void NeonCreateConcatWorkloadTest(std::initializer_list out auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); ConcatQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle0 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType))); @@ -871,10 +872,10 @@ static void NeonCreateStackWorkloadTest(const std::initializer_listGetData(); for (unsigned int i = 0; i < numInputs; ++i) { - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[i]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); } - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -898,8 +899,6 @@ BOOST_AUTO_TEST_CASE(CreateStackUint8Workload) template static void NeonCreateQuantizedLstmWorkloadTest() { - using boost::polymorphic_downcast; - Graph graph; NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); @@ -907,23 +906,23 @@ static void NeonCreateQuantizedLstmWorkloadTest() QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData(); - IAclTensorHandle* inputHandle = polymorphic_downcast(queueDescriptor.m_Inputs[0]); + IAclTensorHandle* inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2}))); BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8)); - IAclTensorHandle* cellStateInHandle = polymorphic_downcast(queueDescriptor.m_Inputs[1]); + IAclTensorHandle* cellStateInHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16)); - IAclTensorHandle* outputStateInHandle = polymorphic_downcast(queueDescriptor.m_Inputs[2]); + IAclTensorHandle* outputStateInHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[2]); BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8)); - IAclTensorHandle* cellStateOutHandle = polymorphic_downcast(queueDescriptor.m_Outputs[0]); + IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16)); - IAclTensorHandle* outputStateOutHandle = polymorphic_downcast(queueDescriptor.m_Outputs[1]); + IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8)); } diff --git a/src/backends/neon/workloads/NeonAbsWorkload.cpp b/src/backends/neon/workloads/NeonAbsWorkload.cpp index 7f8ed5a006..ea14ac3897 100644 --- a/src/backends/neon/workloads/NeonAbsWorkload.cpp +++ b/src/backends/neon/workloads/NeonAbsWorkload.cpp @@ -9,8 +9,7 @@ #include #include - -#include +#include namespace armnn { @@ -28,8 +27,8 @@ NeonAbsWorkload::NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const Wor { m_Data.ValidateInputsOutputs("NeonAbsWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_AbsLayer.configure(&input, &output); } diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp index 916d67449c..4b2169a6ee 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.cpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp @@ -5,7 +5,9 @@ #include "NeonActivationWorkload.hpp" #include "NeonWorkloadUtils.hpp" + #include +#include #include @@ -36,8 +38,8 @@ NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& const arm_compute::ActivationLayerInfo activationLayerInfo = ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output, activationLayerInfo); diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp index a025c0b8f5..cb0c8a471f 100644 --- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp +++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp @@ -7,6 +7,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include #include @@ -35,9 +36,9 @@ NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp index 0fa9d43b15..0fb819db0b 100644 --- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp +++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp @@ -10,6 +10,7 @@ #include +#include #include #include @@ -54,8 +55,8 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des const WorkloadInfo& info) : BaseWorkload(descriptor, info) { - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto numDims = info.m_InputTensorInfos[0].GetNumDimensions(); auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis); diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp index cd931e3797..ff777dbf9b 100644 --- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp @@ -7,8 +7,9 @@ #include "NeonWorkloadUtils.hpp" -#include #include +#include +#include #include @@ -53,8 +54,8 @@ NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload( { m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp index b9cb807779..1cffbe1448 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.cpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -41,9 +42,9 @@ void NeonConstantWorkload::Execute() const ARMNN_ASSERT(data.m_LayerOutput != nullptr); arm_compute::ITensor& output = - boost::polymorphic_downcast(data.m_Outputs[0])->GetTensor(); + PolymorphicDowncast(data.m_Outputs[0])->GetTensor(); arm_compute::DataType computeDataType = - boost::polymorphic_downcast(data.m_Outputs[0])->GetDataType(); + PolymorphicDowncast(data.m_Outputs[0])->GetDataType(); switch (computeDataType) { diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp index 5d45642eef..144baec0ca 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp @@ -5,8 +5,9 @@ #include "NeonConvolution2dWorkload.hpp" -#include #include +#include +#include #include #include @@ -65,8 +66,8 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( // todo: check tensor shapes match. - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp index 8b229a1cda..9ae82ff79f 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -32,8 +33,8 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); std::unique_ptr layer(new arm_compute::NEDequantizationLayer()); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp index 2ed47e4463..36f1cd98de 100644 --- a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp +++ b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp @@ -9,8 +9,7 @@ #include #include - -#include +#include namespace armnn { @@ -85,7 +84,7 @@ NeonDetectionPostProcessWorkload::NeonDetectionPostProcessWorkload( auto AclTensorRef = [](ITensorHandle* tensor) -> arm_compute::ITensor& { - return boost::polymorphic_downcast(tensor)->GetTensor(); + return PolymorphicDowncast(tensor)->GetTensor(); }; arm_compute::ITensor& boxEncodings = AclTensorRef(m_Data.m_Inputs[0]); diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp index 6fdb455f25..fc353f136d 100644 --- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp +++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp @@ -4,7 +4,9 @@ // #include "NeonDivisionWorkload.hpp" + #include +#include #include namespace armnn @@ -29,9 +31,9 @@ NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonDivisionWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_DivLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp index 5b4e9094fd..c49df33a54 100644 --- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp @@ -7,9 +7,9 @@ #include "NeonWorkloadUtils.hpp" -#include +#include -#include +#include namespace armnn { @@ -19,8 +19,8 @@ NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descr { m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp index 338c7eb1f6..e808c60c0c 100644 --- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp +++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp @@ -8,6 +8,7 @@ #include "NeonWorkloadUtils.hpp" #include #include +#include #include #include @@ -51,8 +52,8 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue { m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_WeightsTensor = std::make_unique(); BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo()); diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index 9de6c82702..d54607d31e 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -8,6 +8,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include @@ -33,8 +34,8 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma { m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp index c433d81973..46d500bfdc 100644 --- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp +++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp @@ -5,6 +5,7 @@ #include "NeonMaximumWorkload.hpp" #include +#include #include namespace armnn @@ -29,9 +30,9 @@ NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonMaximumWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_MaxLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp index 2867a8079f..53e483a182 100644 --- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp +++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp @@ -4,7 +4,9 @@ // #include "NeonMinimumWorkload.hpp" + #include +#include #include namespace armnn @@ -29,9 +31,9 @@ NeonMinimumWorkload::NeonMinimumWorkload(const MinimumQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonMinimumWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_MinLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp index 66fbedfa63..d813970901 100644 --- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp +++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include namespace armnn @@ -37,9 +39,9 @@ NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueue { m_Data.ValidateInputsOutputs("NeonMultiplicationWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it, // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be diff --git a/src/backends/neon/workloads/NeonNegWorkload.cpp b/src/backends/neon/workloads/NeonNegWorkload.cpp index afe05583fd..06c146754c 100644 --- a/src/backends/neon/workloads/NeonNegWorkload.cpp +++ b/src/backends/neon/workloads/NeonNegWorkload.cpp @@ -9,8 +9,7 @@ #include #include - -#include +#include namespace armnn { @@ -28,8 +27,8 @@ NeonNegWorkload::NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descript { m_Data.ValidateInputsOutputs("NeonNegWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_NegLayer.configure(&input, &output); } diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp index 8cb4ec975d..77fc429b95 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp @@ -8,6 +8,7 @@ #include "NeonWorkloadUtils.hpp" #include #include +#include #include @@ -77,8 +78,8 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality."); } - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp index 9934c29a41..968d5ce02d 100644 --- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include #include #include @@ -37,8 +39,8 @@ NeonPooling2dWorkload::NeonPooling2dWorkload( { m_Data.ValidateInputsOutputs("NeonPooling2dWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonPreluWorkload.cpp b/src/backends/neon/workloads/NeonPreluWorkload.cpp index 107090e704..8e6ea301de 100644 --- a/src/backends/neon/workloads/NeonPreluWorkload.cpp +++ b/src/backends/neon/workloads/NeonPreluWorkload.cpp @@ -5,7 +5,9 @@ #include "NeonPreluWorkload.hpp" #include "NeonWorkloadUtils.hpp" + #include +#include #include @@ -31,9 +33,9 @@ NeonPreluWorkload::NeonPreluWorkload(const PreluQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("NeonPreluWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& alpha = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& alpha = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &alpha, &output); diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp index 659bb94723..8b11da7253 100644 --- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp +++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp @@ -7,9 +7,9 @@ #include "NeonWorkloadUtils.hpp" -#include +#include -#include +#include namespace armnn { @@ -29,8 +29,8 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp index e936ab7446..9e3be2655c 100644 --- a/src/backends/neon/workloads/NeonResizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp @@ -9,7 +9,9 @@ #include #include +#include #include + #include using namespace armnn::armcomputetensorutils; @@ -45,8 +47,8 @@ NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("NeonResizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp index b6292833dd..44980df996 100644 --- a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp +++ b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp @@ -9,8 +9,8 @@ #include #include +#include -#include namespace armnn { @@ -28,8 +28,8 @@ NeonRsqrtWorkload::NeonRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, con { m_Data.ValidateInputsOutputs("NeonRsqrtWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_RsqrtLayer.configure(&input, &output); } diff --git a/src/backends/neon/workloads/NeonSliceWorkload.cpp b/src/backends/neon/workloads/NeonSliceWorkload.cpp index 171edc6c59..32cc042eab 100644 --- a/src/backends/neon/workloads/NeonSliceWorkload.cpp +++ b/src/backends/neon/workloads/NeonSliceWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include #include @@ -37,8 +39,8 @@ NeonSliceWorkload::NeonSliceWorkload(const SliceQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("NeonSliceWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::Coordinates starts; arm_compute::Coordinates ends; diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp index 152d19cc04..a4690a7985 100644 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp @@ -8,6 +8,8 @@ #include "NeonWorkloadUtils.hpp" #include +#include + #include namespace armnn @@ -20,8 +22,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1); // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions. - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(memoryManager); unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 15a7066861..05d93b963c 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -7,6 +7,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include @@ -20,8 +21,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); const auto outputQuantization = output.info()->quantization_info(); diff --git a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp index a4204b21e6..2982cd181d 100644 --- a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp +++ b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp @@ -5,6 +5,8 @@ #include "NeonSpaceToDepthWorkload.hpp" #include "NeonWorkloadUtils.hpp" + +#include #include namespace armnn @@ -33,12 +35,12 @@ NeonSpaceToDepthWorkload::NeonSpaceToDepthWorkload(const SpaceToDepthQueueDescri arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); input.info()->set_data_layout(aclDataLayout); int32_t blockSize = boost::numeric_cast(desc.m_Parameters.m_BlockSize); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); output.info()->set_data_layout(aclDataLayout); m_Layer.reset(new arm_compute::NESpaceToDepthLayer()); diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index 224e97af2d..19fa7c6389 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -74,7 +75,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri return; } - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); std::vector aclOutputs; for (auto output : m_Data.m_Outputs) diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp index 356c0aea83..282005c7cc 100644 --- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp +++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include namespace armnn @@ -50,8 +51,8 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri { m_Data.ValidateInputsOutputs("NeonStridedSliceWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::Coordinates starts; arm_compute::Coordinates ends; diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp index f4b4707633..ccc2bfe58b 100644 --- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp +++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp @@ -7,6 +7,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include #include @@ -34,9 +35,9 @@ NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescripto { m_Data.ValidateInputsOutputs("NeonSubtractionWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp index ffca2076fe..985f540e6a 100644 --- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp @@ -9,6 +9,7 @@ #include #include +#include #include @@ -60,8 +61,8 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload( { m_Data.ValidateInputsOutputs("NeonTransposeConvolution2dWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index b83d205970..29bfbc0ee2 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -5,6 +5,7 @@ #include +#include #include #include #include @@ -16,8 +17,8 @@ template void CheckInputOutput(std::unique_ptr workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo) { auto queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); } @@ -29,9 +30,9 @@ void CheckInputsOutput(std::unique_ptr workload, const TensorInfo& outputInfo) { auto queueDescriptor = workload->GetData(); - auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle0 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0)); BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1)); BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); @@ -538,16 +539,16 @@ static void RefCreateSplitterWorkloadTest() // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); } @@ -910,7 +911,7 @@ static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape) // Check output is as expected auto queueDescriptor = workload->GetData(); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); } @@ -950,7 +951,7 @@ static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape, // Check output is as expected auto queueDescriptor = workload->GetData(); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType))); } @@ -1054,10 +1055,10 @@ static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape, StackQueueDescriptor queueDescriptor = workload->GetData(); for (unsigned int i = 0; i < numInputs; ++i) { - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[i]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType))); } - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); } diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp index f1b31571db..a36ed45970 100644 --- a/src/backends/reference/workloads/RefWorkloadUtils.hpp +++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp @@ -9,12 +9,12 @@ #include #include +#include #include #include #include -#include namespace armnn { @@ -27,7 +27,7 @@ inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle) { // We know that reference workloads use RefTensorHandles for inputs and outputs const RefTensorHandle* refTensorHandle = - boost::polymorphic_downcast(tensorHandle); + PolymorphicDowncast(tensorHandle); return refTensorHandle->GetTensorInfo(); } diff --git a/src/profiling/test/ProfilingTests.hpp b/src/profiling/test/ProfilingTests.hpp index d1052cea97..b41f2dd58d 100644 --- a/src/profiling/test/ProfilingTests.hpp +++ b/src/profiling/test/ProfilingTests.hpp @@ -8,12 +8,12 @@ #include "ProfilingMocks.hpp" #include +#include #include #include #include -#include #include #include @@ -229,7 +229,7 @@ public: MockProfilingConnection* GetMockProfilingConnection() { IProfilingConnection* profilingConnection = GetProfilingConnection(m_ProfilingService); - return boost::polymorphic_downcast(profilingConnection); + return PolymorphicDowncast(profilingConnection); } void ForceTransitionToState(ProfilingState newState) -- cgit v1.2.1