aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-02-15 18:23:17 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2021-02-15 18:23:17 +0000
commit3d2b4b2bff3be27f12a99e0e01284078870ee954 (patch)
tree33a9ea2a3267707088fd0a4a727d73a4568bb0a6
parent052fbe9c86628cfdc534c515d9b451aa8d3d1cb6 (diff)
downloadarmnn-3d2b4b2bff3be27f12a99e0e01284078870ee954.tar.gz
IVGCVSW-4873 Implement Pimpl Idiom for INetwork and IOptimizedNetwork
!android-nn-driver:5042 Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: Ia1ce8b839e81b46428ba0f78463e085e5906958d Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Signed-off-by: Finn Williams <Finn.Williams@arm.com>
-rw-r--r--include/armnn/INetwork.hpp432
-rw-r--r--src/armnn/LoadedNetwork.cpp22
-rw-r--r--src/armnn/LoadedNetwork.hpp6
-rw-r--r--src/armnn/Network.cpp697
-rw-r--r--src/armnn/Network.hpp176
-rw-r--r--src/armnn/NetworkQuantizer.cpp6
-rw-r--r--src/armnn/OptimizedNetworkImpl.hpp30
-rw-r--r--src/armnn/Runtime.cpp2
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp68
-rw-r--r--src/armnn/test/CreateWorkload.hpp16
-rw-r--r--src/armnn/test/NetworkTests.cpp29
-rw-r--r--src/armnn/test/OptimizerTests.cpp8
-rw-r--r--src/armnn/test/QuantizerTest.cpp123
-rw-r--r--src/armnn/test/RuntimeTests.cpp2
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.cpp8
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp4
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp4
-rw-r--r--src/armnn/test/TestUtils.cpp10
-rw-r--r--src/armnn/test/TestUtils.hpp3
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp6
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp4
-rw-r--r--src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp73
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp3
-rw-r--r--src/armnnTfParser/test/Assert.cpp6
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp1
-rw-r--r--src/backends/backendsCommon/test/OptimizationViewsTests.cpp4
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp60
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp12
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp18
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp27
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp9
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp8
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp19
33 files changed, 1285 insertions, 611 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index c667d9ce8b..98c9f3f8da 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -106,7 +106,72 @@ protected:
~IConnectableLayer() {}
};
+
+struct OptimizerOptions
+{
+ OptimizerOptions()
+ : m_ReduceFp32ToFp16(false)
+ , m_Debug(false)
+ , m_ReduceFp32ToBf16(false)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+ , m_ImportEnabled(false)
+ , m_ModelOptions()
+ {}
+
+ OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
+ ModelOptions modelOptions = {})
+ : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+ , m_Debug(debug)
+ , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+ , m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
+ {
+ if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
+ {
+ throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
+ }
+ }
+
+ OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
+ ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
+ bool importEnabled = false, ModelOptions modelOptions = {})
+ : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+ , m_Debug(debug)
+ , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(shapeInferenceMethod)
+ , m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
+ {
+ if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
+ {
+ throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
+ }
+ }
+
+ // Reduce Fp32 data to Fp16 for faster processing
+ bool m_ReduceFp32ToFp16;
+
+ // Add debug data for easier troubleshooting
+ bool m_Debug;
+
+ // Reduce Fp32 data to Bf16 for faster processing
+ bool m_ReduceFp32ToBf16;
+
+ // Infer output size when not available
+ ShapeInferenceMethod m_shapeInferenceMethod;
+
+ // Enable Import
+ bool m_ImportEnabled;
+
+ // Enable Model Options
+ ModelOptions m_ModelOptions;
+};
+
+class IWorkloadFactory;
+class NetworkImpl;
using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
+using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>;
/// Main network class which provides the interface for building up a neural network.
/// This object is subsequently required by the IRuntime::Load() method.
@@ -117,28 +182,28 @@ public:
static INetworkPtr Create(NetworkOptions networkOptions = {});
static void Destroy(INetwork* network);
- virtual Status PrintGraph() = 0;
+ Status PrintGraph();
/// Adds an input layer to the network.
/// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified.
/// when passing the inputs to the IRuntime::EnqueueWorkload() function.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0;
+ IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
/// Adds an ArgMinMax layer to the network.
/// @param desc - Parameters for the L2 normalization operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+ const char* name = nullptr);
/// Add a Comparison layer to the network.
/// @param name - Optional name for the layer.
/// @param desc - Descriptor for the comparison operation.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
+ const char* name = nullptr);
/// Adds a concatenation layer to the network.
/// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
@@ -147,8 +212,8 @@ public:
/// second input, etc....
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
+ const char* name = nullptr);
/// Adds a 2D convolution layer to the network.
/// @param convolution2dDescriptor - Description of the 2D convolution layer.
@@ -156,28 +221,28 @@ public:
/// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
- virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
- virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr);
/// Adds a depth to space layer to the network.
/// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
+ const char* name = nullptr);
/// Adds a 2D depthwise convolution layer to the network.
/// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
@@ -185,52 +250,52 @@ public:
/// @param biases Optional tensor for the bias data. If specified, must match the output tensor shape.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
+ IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
+ IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
+ IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
/// Adds a Dequantize layer to the network.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddDequantizeLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
/// Adds a Detection PostProcess layer to the network.
/// @param descriptor - Description of the Detection PostProcess layer.
/// @param anchors - Tensor for anchors.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddDetectionPostProcessLayer(
+ IConnectableLayer* AddDetectionPostProcessLayer(
const DetectionPostProcessDescriptor& descriptor,
const ConstTensor& anchors,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
/// Add an ElementwiseUnary layer to the network.
/// @param name - Optional name for the layer.
/// @param desc - Descriptor for the elementwiseUnary operation.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+ const char* name = nullptr);
/// Add an Fill layer to the network.
/// @param name - Optional name for the layer.
/// @param fillDescriptor - Descriptor for the fill operation.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
+ const char* name = nullptr);
/// Adds a fully connected layer to the network.
/// @param fullyConnectedDescriptor - Description of the fully connected layer.
@@ -238,62 +303,62 @@ public:
/// @param biases - Optional tensor for the bias data.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
- virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
- virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name = nullptr);
/// Adds a permute layer to the network.
/// @param permuteDescriptor - PermuteDescriptor to configure the permute.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
+ const char* name = nullptr);
/// Adds a batch to space ND layer to the network.
/// @param batchToSpaceNdDescriptor - Description of the layer.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name = nullptr);
/// Adds a pooling layer to the network.
/// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
+ const char* name = nullptr);
/// Adds an activation layer to the network.
/// @param activationDescriptor - ActivationDescriptor to configure the activation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+ const char* name = nullptr);
/// Adds a normalization layer to the network.
/// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
+ const char* name = nullptr);
/// Adds a slice layer to the network.
/// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr) = 0;
+ IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
/// Adds a softmax layer to the network.
/// If the data type is QAsymm8, then the output quantization parameters
@@ -301,8 +366,8 @@ public:
/// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
+ const char* name = nullptr);
/// Adds a splitter layer to the network.
/// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
@@ -311,13 +376,13 @@ public:
/// the first output, second view to the second output, etc....
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+ const char* name = nullptr);
/// Adds a merge layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddMergeLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddMergeLayer(const char* name = nullptr);
/// Adds a concat layer to the network.
/// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
@@ -327,24 +392,24 @@ public:
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
- virtual IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
+ const char* name = nullptr);
/// Add absolute layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- virtual IConnectableLayer* AddAbsLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddAbsLayer(const char* name = nullptr);
/// Adds an addition layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddAdditionLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
/// Adds a multiplication layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
/// Adds a batch normalization layer to the network.
/// @param mean - Pre-calculated mean for each channel.
@@ -353,61 +418,61 @@ public:
/// @param gamma - Per-channel multiplicative factor.
/// @return - Interface for configuring the layer.
/// @param name - Optional name for the layer.
- virtual IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
+ IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
const ConstTensor& variance,
const ConstTensor& beta,
const ConstTensor& gamma,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
/// Adds a rank layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddRankLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddRankLayer(const char* name = nullptr);
/// Adds a resize bilinear layer to the network.
/// @param resizeDesc - Parameters for the resize operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
- virtual IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
+ const char* name = nullptr);
/// Adds a resize layer to the network.
/// @param resizeDescriptor - Parameters for the resize operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr);
/// Adds a reduce layer to the network.
/// @param ReduceDescriptor - Parameters for the reduce operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+ const char* name = nullptr);
/// Adds an instance normalization layer to the network.
/// @param desc - Parameters for the instance normalization operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
+ const char* name = nullptr);
/// Adds an L2 normalization layer to the network.
/// Normalization is performed along dimension 1, but requires a 4d input.
/// @param desc - Parameters for the L2 normalization operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
+ const char* name = nullptr);
/// Adds a log softmax layer to the network.
/// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
+ const char* name = nullptr);
/// Adds a layer with no inputs and a single output, which always corresponds to
/// the passed in constant tensor.
@@ -416,71 +481,71 @@ public:
/// be freed or reused after this function is called.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddConstantLayer(const ConstTensor& input,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddConstantLayer(const ConstTensor& input,
+ const char* name = nullptr);
/// Adds a reshape layer to the network.
/// @param reshapeDescriptor - Parameters for the reshape operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
+ const char* name = nullptr);
/// Adds a space to batch layer to the network.
/// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+ const char* name = nullptr);
/// Adds a space to depth layer to the network.
/// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name = nullptr);
/// Adds a floor layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddFloorLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddFloorLayer(const char* name = nullptr);
/// Adds an output layer to the network.
/// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
/// when passing the outputs to the IRuntime::EnqueueWorkload() function.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) = 0;
+ IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr);
/// Add a Lstm layer to the network
/// @param descriptor - Parameters for the Lstm operation
/// @param params - Weights and biases for the LSTM cell
/// @param name - Optional name for the layer
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name = nullptr);
/// Adds a division layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddDivisionLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
/// Adds a subtraction layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
/// Add a Maximum layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddMaximumLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
/// Add a Mean layer to the network.
/// @param meanDescriptor - Parameters for the mean operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) = 0;
+ IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
/// Adds a fully pad layer to the network.
/// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
@@ -488,66 +553,66 @@ public:
/// paddings[i,1] indicates the amount of padding to add after the end of dimension i
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
+ const char* name = nullptr);
/// Add a quantize layer to the network
///@param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
/// Adds a strided slice layer to the network.
/// @param StridedSliceDescriptor - Parameters for the strided slice operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
+ const char* name = nullptr);
/// Add a Minimum layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddMinimumLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
/// Add a Greater layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- virtual IConnectableLayer* AddGreaterLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
/// Add a Equal layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- virtual IConnectableLayer* AddEqualLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddEqualLayer(const char* name = nullptr);
/// Add Reciprocal of square root layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
/// Add Gather layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
ARMNN_DEPRECATED_MSG("Use AddGatherLayer with descriptor instead")
- virtual IConnectableLayer* AddGatherLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddGatherLayer(const char* name = nullptr);
/// Add Gather layer to the network.
/// @param descriptor - Description of the gather layer.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddGatherLayer(const GatherDescriptor& descriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddGatherLayer(const GatherDescriptor& descriptor,
+ const char* name = nullptr);
/// Adds a switch layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddSwitchLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
/// Adds a PReLU layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddPreluLayer(const char* name = nullptr) = 0;
+ IConnectableLayer* AddPreluLayer(const char* name = nullptr);
/// Adds a 2D transpose convolution layer to the network.
/// @param descriptor - Description of the 2D transpose convolution layer.
@@ -555,24 +620,24 @@ public:
/// @param biases - Optional tensor for the bias data.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+ IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
/// Adds a transpose layer to the network.
/// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+ const char* name = nullptr);
/// Adds a stack layer to the network.
/// @param descriptor - Description of the stack layer.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
+ const char* name = nullptr);
/// Add a stand-in layer for a type unknown to the Arm NN framework.
/// Note: Due to the nature of this layer, no validation can be performed by the framework.
@@ -580,115 +645,90 @@ public:
/// tensor sizes cannot be inferred.
/// @descriptor - Descriptor for the StandIn layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name = nullptr);
/// Add a QuantizedLstm layer to the network
/// @param params - The weights and biases for the Quantized LSTM cell
/// @param name - Optional name for the layer
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+ const char* name = nullptr);
/// Add a QLstm layer to the network
/// @param descriptor - Parameters for the QLstm operation
/// @param params - Weights and biases for the layer
/// @param name - Optional name for the layer
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
+ IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
const LstmInputParams& params,
- const char* name = nullptr) = 0;
+ const char* name = nullptr);
/// Adds a Logical Binary layer to the network.
/// @param descriptor - Description of the Logical Binary layer.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- virtual IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
- const char* name = nullptr) = 0;
+ IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
+ const char* name = nullptr);
- virtual void Accept(ILayerVisitor& visitor) const = 0;
+ void Accept(ILayerVisitor& visitor) const;
- virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
+ void ExecuteStrategy(IStrategy& strategy) const;
protected:
- ~INetwork() {}
-};
+ ~INetwork();
-using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>;
+ friend class NetworkQuantizer;
+ friend void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy);
+ friend class TestConnectionPreservation;
+ friend TensorInfo GetInputTensorInfo(const INetwork* network);
+ friend IOptimizedNetworkPtr Optimize(const INetwork& network,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptions& options,
+ Optional<std::vector<std::string>&> messages);
-class IOptimizedNetwork
-{
-public:
- static void Destroy(IOptimizedNetwork* network);
+ INetwork(NetworkOptions networkOptions = {});
- virtual Status PrintGraph() = 0;
- virtual Status SerializeToDot(std::ostream& stream) const = 0;
-
- virtual profiling::ProfilingGuid GetGuid() const = 0;
-
-protected:
- ~IOptimizedNetwork() {}
+ std::unique_ptr<NetworkImpl> pNetworkImpl;
};
-struct OptimizerOptions
+struct BackendSettings;
+struct OptimizationResult;
+class OptimizedNetworkImpl;
+class IOptimizedNetwork
{
- OptimizerOptions()
- : m_ReduceFp32ToFp16(false)
- , m_Debug(false)
- , m_ReduceFp32ToBf16(false)
- , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
- , m_ImportEnabled(false)
- , m_ModelOptions()
- {}
-
- OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
- ModelOptions modelOptions = {})
- : m_ReduceFp32ToFp16(reduceFp32ToFp16)
- , m_Debug(debug)
- , m_ReduceFp32ToBf16(reduceFp32ToBf16)
- , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
- , m_ImportEnabled(importEnabled)
- , m_ModelOptions(modelOptions)
- {
- if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
- {
- throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
- }
- }
-
- OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
- ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
- bool importEnabled = false, ModelOptions modelOptions = {})
- : m_ReduceFp32ToFp16(reduceFp32ToFp16)
- , m_Debug(debug)
- , m_ReduceFp32ToBf16(reduceFp32ToBf16)
- , m_shapeInferenceMethod(shapeInferenceMethod)
- , m_ImportEnabled(importEnabled)
- , m_ModelOptions(modelOptions)
- {
- if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
- {
- throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
- }
- }
-
- // Reduce Fp32 data to Fp16 for faster processing
- bool m_ReduceFp32ToFp16;
-
- // Add debug data for easier troubleshooting
- bool m_Debug;
+public:
+ static void Destroy(IOptimizedNetwork* network);
- // Reduce Fp32 data to Bf16 for faster processing
- bool m_ReduceFp32ToBf16;
+ Status PrintGraph();
+ Status SerializeToDot(std::ostream& stream) const;
- // Infer output size when not available
- ShapeInferenceMethod m_shapeInferenceMethod;
+ profiling::ProfilingGuid GetGuid() const;
- // Enable Import
- bool m_ImportEnabled;
+ IOptimizedNetwork(std::unique_ptr<Graph> graph);
+ IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl);
+ ~IOptimizedNetwork();
- // Enable Model Options
- ModelOptions m_ModelOptions;
+protected:
+ friend class LoadedNetwork;
+ friend Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
+ friend ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
+ friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptions& options,
+ Optional<std::vector<std::string>&> messages);
+
+ template <typename PreCompiledWorkload, armnn::DataType dataType>
+ friend std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> CreatePreCompiledWorkloadTest(
+ armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ bool biasEnabled);
+
+ IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
+
+ std::unique_ptr<OptimizedNetworkImpl> pOptimizedNetworkImpl;
};
/// Create an optimized version of the network
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 4a307e2e04..ea09231c3c 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -80,7 +80,7 @@ void AddWorkloadStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils
} // anonymous
-std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
std::string& errorMessage,
const INetworkProperties& networkProperties,
profiling::ProfilingService& profilingService)
@@ -115,7 +115,7 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<
return loadedNetwork;
}
-LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
const INetworkProperties& networkProperties,
profiling::ProfilingService& profilingService) :
m_OptimizedNetwork(std::move(net)),
@@ -128,7 +128,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
m_Profiler = std::make_shared<IProfiler>();
ProfilerManager::GetInstance().RegisterProfiler(m_Profiler.get());
- Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
+ Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
//First create tensor handlers, backends and workload factories.
//Handlers are created before workloads are.
//Because workload creation can modify some of the handlers,
@@ -146,7 +146,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
if (backend->SupportsTensorAllocatorAPI())
{
auto workloadFactory = backend->CreateWorkloadFactory(
- m_TensorHandleFactoryRegistry, m_OptimizedNetwork->GetModelOptions());
+ m_TensorHandleFactoryRegistry, m_OptimizedNetwork->pOptimizedNetworkImpl->GetModelOptions());
m_WorkloadFactories.emplace(
std::make_pair(backendId, std::make_pair(std::move(workloadFactory), nullptr)));
}
@@ -154,7 +154,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
{
IBackendInternal::IMemoryManagerSharedPtr memoryManager = backend->CreateMemoryManager();
auto workloadFactory = backend->CreateWorkloadFactory(
- memoryManager, m_OptimizedNetwork->GetModelOptions());
+ memoryManager, m_OptimizedNetwork->pOptimizedNetworkImpl->GetModelOptions());
m_WorkloadFactories.emplace(
std::make_pair(backendId, std::make_pair(std::move(workloadFactory), memoryManager)));
@@ -267,7 +267,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
}
// Set up memory.
- m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers();
+ m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().AllocateDynamicBuffers();
// Now that the intermediate tensor memory has been set-up, do any post allocation configuration for each workload.
for (auto& workload : m_WorkloadQueue)
@@ -278,7 +278,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
void LoadedNetwork::SendNetworkStructure()
{
- Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
+ Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
@@ -320,7 +320,7 @@ profiling::ProfilingGuid LoadedNetwork::GetNetworkGuid()
TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
{
- for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
+ for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
{
ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
if (inputLayer->GetBindingId() == layerId)
@@ -334,7 +334,7 @@ TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
{
- for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
+ for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
{
ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
@@ -368,7 +368,7 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
ARMNN_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer,
{},
reasonIfUnsupported,
- m_OptimizedNetwork->GetModelOptions()),
+ m_OptimizedNetwork->pOptimizedNetworkImpl->GetModelOptions()),
"Factory does not support layer");
IgnoreUnused(reasonIfUnsupported);
return *workloadFactory;
@@ -470,7 +470,7 @@ private:
Status LoadedNetwork::EnqueueWorkload(const InputTensors& inputTensors,
const OutputTensors& outputTensors)
{
- const Graph& graph = m_OptimizedNetwork->GetGraph();
+ const Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
// Walk graph to determine the order of execution.
if (graph.GetNumLayers() < 2)
diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp
index 3a44206683..c7dd37fdea 100644
--- a/src/armnn/LoadedNetwork.hpp
+++ b/src/armnn/LoadedNetwork.hpp
@@ -42,7 +42,7 @@ public:
Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors);
- static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+ static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
std::string & errorMessage,
const INetworkProperties& networkProperties,
profiling::ProfilingService& profilingService);
@@ -63,7 +63,7 @@ public:
private:
void AllocateWorkingMemory(std::lock_guard<std::mutex>& lock);
- LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+ LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
const INetworkProperties& networkProperties,
profiling::ProfilingService& profilingService);
@@ -87,7 +87,7 @@ private:
BackendPtrMap m_Backends;
WorkloadFactoryMap m_WorkloadFactories;
- std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork;
+ std::unique_ptr<IOptimizedNetwork> m_OptimizedNetwork;
WorkloadQueue m_InputQueue;
WorkloadQueue m_WorkloadQueue;
WorkloadQueue m_OutputQueue;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index bf7a056f6e..9373a6ac15 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -38,9 +38,473 @@
namespace armnn
{
+INetwork::INetwork(NetworkOptions networkOptions) : pNetworkImpl(new NetworkImpl(networkOptions)) {}
+
+INetwork::~INetwork() = default;
+
+Status INetwork::PrintGraph()
+{
+ return pNetworkImpl->PrintGraph();
+}
+
+IConnectableLayer* INetwork::AddInputLayer(LayerBindingId id, const char* name)
+{
+ return pNetworkImpl->AddInputLayer(id, name);
+}
+
+
+IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+ const char* name)
+{
+ return pNetworkImpl->AddArgMinMaxLayer(desc, name);
+}
+
+
+IConnectableLayer* INetwork::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddComparisonLayer(comparisonDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddConcatLayer(concatDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const char* name)
+{
+ Optional<ConstTensor> biases;
+ return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name )
+{
+
+ return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
+ weights,
+ armnn::Optional<ConstTensor>(biases),
+ name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const char* name)
+{
+ Optional<ConstTensor> biases;
+ return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
+ armnn::Optional<ConstTensor>(biases), name);
+}
+
+
+IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
+{
+ return pNetworkImpl->AddDequantizeLayer(name);
+}
+
+
+IConnectableLayer* INetwork::AddDetectionPostProcessLayer(
+ const DetectionPostProcessDescriptor& descriptor,
+ const ConstTensor& anchors,
+ const char* name)
+{
+ return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
+}
+
+
+IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddFillLayer(fillDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
+}
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const char* name)
+{
+ Optional<ConstTensor> biases;
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
+}
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const ConstTensor& weights,
+ const ConstTensor& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
+ armnn::Optional<ConstTensor>(biases), name);
+}
+
+IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddPermuteLayer(permuteDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddActivationLayer(activationDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddNormalizationLayer(normalizationDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
+{
+ return pNetworkImpl->AddSliceLayer(sliceDescriptor, name);
+}
+IConnectableLayer* INetwork::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddSoftmaxLayer(softmaxDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddSplitterLayer(splitterDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddMergeLayer(const char* name)
+{
+ return pNetworkImpl->AddMergeLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddAbsLayer(const char* name)
+{
+ return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
+}
+
+IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
+{
+ return pNetworkImpl->AddAdditionLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
+{
+ return pNetworkImpl->AddMultiplicationLayer(name);
+}
+
+IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
+ const ConstTensor& mean,
+ const ConstTensor& variance,
+ const ConstTensor& beta,
+ const ConstTensor& gamma,
+ const char* name)
+{
+ return pNetworkImpl->AddBatchNormalizationLayer(desc, mean, variance, beta, gamma, name);
+}
+
+IConnectableLayer* INetwork::AddRankLayer(const char* name)
+{
+ return pNetworkImpl->AddRankLayer(name);
+}
+
+IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
+ const char* name)
+{
+ ResizeDescriptor resizeDescriptor;
+ resizeDescriptor.m_Method = ResizeMethod::Bilinear;
+ resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
+ resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
+ resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
+ resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
+ resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
+
+ return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddReduceLayer(reduceDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
+ const char* name)
+{
+ return pNetworkImpl->AddInstanceNormalizationLayer(desc, name);
+}
+
+IConnectableLayer* INetwork::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
+ const char* name)
+{
+ return pNetworkImpl->AddL2NormalizationLayer(desc, name);
+}
+
+IConnectableLayer* INetwork::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddConstantLayer(const ConstTensor& input,
+ const char* name)
+{
+ return pNetworkImpl->AddConstantLayer(input, name);
+}
+
+IConnectableLayer* INetwork::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddReshapeLayer(reshapeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddFloorLayer(const char* name)
+{
+ return pNetworkImpl->AddFloorLayer(name);
+}
+IConnectableLayer* INetwork::AddOutputLayer(LayerBindingId id, const char* name)
+{
+ return pNetworkImpl->AddOutputLayer(id, name);
+}
+
+IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name)
+{
+ return pNetworkImpl->AddLstmLayer(descriptor, params, name);
+}
+
+IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
+{
+ return pNetworkImpl->AddDivisionLayer(name);
+}
+
+IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
+{
+ return pNetworkImpl->AddSubtractionLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
+{
+ return pNetworkImpl->AddMaximumLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+{
+ return pNetworkImpl->AddMeanLayer(meanDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddPadLayer(const PadDescriptor& padDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddPadLayer(padDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddQuantizeLayer(const char* name)
+{
+ return pNetworkImpl->AddQuantizeLayer(name);
+}
+
+IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddStridedSliceLayer(stridedSliceDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
+{
+ return pNetworkImpl->AddMinimumLayer(name);
+}
+
+IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
+{
+ return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
+}
+
+IConnectableLayer* INetwork::AddEqualLayer(const char* name)
+{
+ return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
+}
+
+IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
+{
+ return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
+}
+
+IConnectableLayer* INetwork::AddGatherLayer(const char* name)
+{
+ GatherDescriptor gatherDescriptor{};
+ return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddGatherLayer(descriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSwitchLayer(const char* name)
+{
+ return pNetworkImpl->AddSwitchLayer(name);
+}
+
+IConnectableLayer* INetwork::AddPreluLayer(const char* name)
+{
+ return pNetworkImpl->AddPreluLayer(name);
+}
+
+IConnectableLayer* INetwork::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddTransposeConvolution2dLayer(descriptor, weights, biases, name);
+}
+
+IConnectableLayer* INetwork::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddTransposeLayer(transposeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddStackLayer(const StackDescriptor& descriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddStackLayer(descriptor, name);
+}
+
+IConnectableLayer* INetwork::AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddStandInLayer(descriptor, name);
+}
+
+IConnectableLayer* INetwork::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+ const char* name)
+{
+ return pNetworkImpl->AddQuantizedLstmLayer(params, name);
+}
+
+IConnectableLayer* INetwork::AddQLstmLayer(const QLstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name)
+{
+ return pNetworkImpl->AddQLstmLayer(descriptor, params, name);
+}
+
+IConnectableLayer* INetwork::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddLogicalBinaryLayer(descriptor, name);
+}
+
+void INetwork::Accept(ILayerVisitor& visitor) const
+{
+ return pNetworkImpl->Accept(visitor);
+}
+
+void INetwork::ExecuteStrategy(IStrategy& strategy) const
+{
+ return pNetworkImpl->ExecuteStrategy(strategy);
+}
+
armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
{
- return new Network(networkOptions);
+ return new INetwork(networkOptions);
}
armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
@@ -50,21 +514,48 @@ armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
void INetwork::Destroy(INetwork* network)
{
- delete PolymorphicDowncast<Network*>(network);
+ delete network;
}
+
+IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph)
+ : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph))) {}
+
+IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl)
+ : pOptimizedNetworkImpl(std::move(impl)) {}
+
+IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
+ : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph), modelOptions)) {}
+
+IOptimizedNetwork::~IOptimizedNetwork() = default;
+
void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
{
- delete PolymorphicDowncast<OptimizedNetwork*>(network);
+ delete network;
+}
+
+Status IOptimizedNetwork::PrintGraph()
+{
+ return pOptimizedNetworkImpl->PrintGraph();
+}
+
+Status IOptimizedNetwork::SerializeToDot(std::ostream& stream) const
+{
+ return pOptimizedNetworkImpl->SerializeToDot(stream);
+}
+
+profiling::ProfilingGuid IOptimizedNetwork::GetGuid() const
+{
+ return pOptimizedNetworkImpl->GetGuid();
}
-Status OptimizedNetwork::PrintGraph()
+Status OptimizedNetworkImpl::PrintGraph()
{
m_Graph->Print();
return Status::Success;
}
-Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
+Status OptimizedNetworkImpl::SerializeToDot(std::ostream& stream) const
{
return m_Graph->SerializeToDot(stream);
}
@@ -375,7 +866,7 @@ OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
}
-OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
BackendSettings& backendSettings,
Graph::Iterator& firstLayer,
Graph::Iterator& lastLayer,
@@ -501,7 +992,7 @@ OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
return result;
}
-OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
BackendSettings& backendSettings,
SubgraphView& subgraph,
Optional<std::vector<std::string>&> errMessages)
@@ -534,7 +1025,7 @@ BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRe
return backends;
}
-OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
+OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
BackendSettings& backendSettings,
BackendsMap& backends,
const ModelOptions& modelOptions,
@@ -1024,16 +1515,15 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
}
- const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
- std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
+ std::unique_ptr<Graph> graph = std::make_unique<Graph>(inNetwork.pNetworkImpl->GetGraph());
- auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph), options.m_ModelOptions),
+ auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
&IOptimizedNetwork::Destroy);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+ IOptimizedNetwork* optNetObjPtr = optNet.get();
// Get the optimized graph
- Graph& optGraph = optNetObjPtr->GetGraph();
+ Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
// Perform AddBroadcastReshapeLayer optimisation
using namespace optimizations;
@@ -1094,7 +1584,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
// Assign an available backend to each layer
Graph::Iterator firstLayer = optGraph.begin();
Graph::Iterator lastLayer = optGraph.end();
- OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
+ OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
backendSettings,
firstLayer,
lastLayer,
@@ -1109,7 +1599,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
OptimizeInverseConversionsFp32()));
// Apply the backend-specific optimizations
- OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
+ OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
backendSettings,
backends,
options.m_ModelOptions,
@@ -1159,13 +1649,13 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
if (!backendSpecificOptimizations.empty())
{
- Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
+ Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
}
}
return optNet;
}
-bool Network::GetShapeInferenceMethod()
+bool NetworkImpl::GetShapeInferenceMethod()
{
if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
{
@@ -1174,51 +1664,51 @@ bool Network::GetShapeInferenceMethod()
return false;
}
-Network::Network(NetworkOptions networkOptions)
+NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
: m_NetworkOptions(networkOptions),
m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
{}
-Network::~Network()
+NetworkImpl::~NetworkImpl()
{
}
-Status Network::PrintGraph()
+Status NetworkImpl::PrintGraph()
{
m_Graph->Print();
return Status::Success;
}
-IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
+IConnectableLayer* NetworkImpl::AddInputLayer(LayerBindingId id, const char* name)
{
return m_Graph->AddLayer<InputLayer>(id, name);
}
-IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+IConnectableLayer* NetworkImpl::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
}
-IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
+IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
const char* name)
{
return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
}
-IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name)
{
return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
}
-IConnectableLayer* Network::AddFillLayer(const FillDescriptor& fillDescriptor,
+IConnectableLayer* NetworkImpl::AddFillLayer(const FillDescriptor& fillDescriptor,
const char* name)
{
return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
}
-IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name)
@@ -1240,7 +1730,7 @@ IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescr
return layer;
}
-IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name)
@@ -1248,7 +1738,7 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto
return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
}
-IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name)
{
@@ -1256,7 +1746,7 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto
return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
}
-IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
const char* name)
@@ -1265,16 +1755,16 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto
return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
}
-IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
+IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name)
{
return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
}
-IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
+IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
{
if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
{
@@ -1293,7 +1783,7 @@ IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescrip
return layer;
}
-IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name)
@@ -1301,7 +1791,7 @@ IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor&
return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name)
{
@@ -1309,7 +1799,7 @@ IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor&
return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
const char* name)
@@ -1318,7 +1808,7 @@ IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor&
return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
}
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
@@ -1341,13 +1831,13 @@ IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
return layer;
}
-IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
+IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
const char* name)
{
return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
}
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
@@ -1356,7 +1846,7 @@ IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name)
@@ -1365,7 +1855,7 @@ IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -1375,7 +1865,7 @@ IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
}
-IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
+IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
const ConstTensor& anchors, const char* name)
{
const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
@@ -1385,91 +1875,91 @@ IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionP
return layer;
}
-IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
+IConnectableLayer* NetworkImpl::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
const char* name)
{
return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
}
-IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
+IConnectableLayer* NetworkImpl::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
}
-IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+IConnectableLayer* NetworkImpl::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name)
{
return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
}
-IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
+IConnectableLayer* NetworkImpl::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
const char* name)
{
return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
}
-IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
+IConnectableLayer* NetworkImpl::AddNormalizationLayer(const NormalizationDescriptor&
normalizationDescriptor,
const char* name)
{
return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
}
-IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
{
return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
}
-IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
+IConnectableLayer* NetworkImpl::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
}
-IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
const char* name)
{
return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
}
-IConnectableLayer* Network::AddMaximumLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
{
return m_Graph->AddLayer<MaximumLayer>(name);
}
-IConnectableLayer* Network::AddMinimumLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
{
return m_Graph->AddLayer<MinimumLayer>(name);
}
-IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
+IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
const char* name)
{
return AddConcatLayer(mergerDescriptor, name);
}
-IConnectableLayer* Network::AddAbsLayer(const char * name)
+IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
{
return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
}
-IConnectableLayer* Network::AddAdditionLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
{
return m_Graph->AddLayer<AdditionLayer>(name);
}
-IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
{
return m_Graph->AddLayer<MultiplicationLayer>(name);
}
-IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
+IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
{
return m_Graph->AddLayer<OutputLayer>(id, name);
}
-IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
+IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
const ConstTensor& variance,
const ConstTensor& beta,
@@ -1486,19 +1976,19 @@ IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationD
return layer;
}
-IConnectableLayer* Network::AddRankLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddRankLayer(const char* name)
{
return m_Graph->AddLayer<RankLayer>(name);
}
-IConnectableLayer* Network::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
- const char* name)
+IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+ const char* name)
{
return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
}
-IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
- const char* name)
+IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
+ const char* name)
{
ResizeDescriptor resizeDescriptor;
resizeDescriptor.m_Method = ResizeMethod::Bilinear;
@@ -1511,31 +2001,30 @@ IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescripto
return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
}
-IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
-resizeDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
{
return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
}
-IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
- const char* name)
+IConnectableLayer* NetworkImpl::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
+ const char* name)
{
return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
}
-IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
- const char* name)
+IConnectableLayer* NetworkImpl::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
+ const char* name)
{
return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
}
-IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
+IConnectableLayer* NetworkImpl::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
const char* name)
{
return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
}
-IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
+IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const char* name)
{
auto layer = m_Graph->AddLayer<ConstantLayer>(name);
@@ -1544,30 +2033,30 @@ IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const cha
return layer;
}
-IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
+IConnectableLayer* NetworkImpl::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
}
-IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+IConnectableLayer* NetworkImpl::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
}
-IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+IConnectableLayer* NetworkImpl::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
const char* name)
{
return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
}
-IConnectableLayer* Network::AddFloorLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddFloorLayer(const char* name)
{
return m_Graph->AddLayer<FloorLayer>(name);
}
-IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
+IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
const LstmInputParams& params,
const char* name)
{
@@ -1708,85 +2197,85 @@ IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor,
return layer;
}
-IConnectableLayer* Network::AddDivisionLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
{
return m_Graph->AddLayer<DivisionLayer>(name);
}
-IConnectableLayer* Network::AddSubtractionLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
{
return m_Graph->AddLayer<SubtractionLayer>(name);
}
-IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
{
return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
}
-IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
{
return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
}
-IConnectableLayer *Network::AddQuantizeLayer(const char *name)
+IConnectableLayer *NetworkImpl::AddQuantizeLayer(const char *name)
{
return m_Graph->AddLayer<QuantizeLayer>(name);
}
-IConnectableLayer* Network::AddDequantizeLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddDequantizeLayer(const char* name)
{
return m_Graph->AddLayer<DequantizeLayer>(name);
}
-IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
+IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
-IConnectableLayer* Network::AddGreaterLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
{
return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
}
-IConnectableLayer* Network::AddEqualLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
{
return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
}
-IConnectableLayer* Network::AddRsqrtLayer(const char * name)
+IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
{
return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
}
-IConnectableLayer* Network::AddGatherLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
{
GatherDescriptor gatherDescriptor{};
return AddGatherLayer(gatherDescriptor, name);
}
-IConnectableLayer* Network::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
+IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name)
{
return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
}
-IConnectableLayer* Network::AddMergeLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMergeLayer(const char* name)
{
return m_Graph->AddLayer<MergeLayer>(name);
}
-IConnectableLayer* Network::AddSwitchLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddSwitchLayer(const char* name)
{
return m_Graph->AddLayer<SwitchLayer>(name);
}
-IConnectableLayer* Network::AddPreluLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddPreluLayer(const char* name)
{
return m_Graph->AddLayer<PreluLayer>(name);
}
-IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
const char* name)
@@ -1808,26 +2297,26 @@ IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvol
return layer;
}
-IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+IConnectableLayer* NetworkImpl::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
const char* name)
{
return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
}
-IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
+IConnectableLayer* NetworkImpl::AddStackLayer(const StackDescriptor& stackDescriptor,
const char* name)
{
return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
}
-IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
+IConnectableLayer* NetworkImpl::AddStandInLayer(const StandInDescriptor& desc,
const char* name)
{
return m_Graph->AddLayer<StandInLayer>(desc, name);
}
-IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name)
{
const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
@@ -1865,7 +2354,7 @@ IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams
return layer;
}
-IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
+IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor,
const LstmInputParams& params,
const char* name)
{
@@ -2007,13 +2496,13 @@ IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
return layer;
}
-IConnectableLayer* Network::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
+IConnectableLayer* NetworkImpl::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
const char* name)
{
return m_Graph->AddLayer<LogicalBinaryLayer>(logicalBinaryDescriptor, name);
}
-void Network::Accept(ILayerVisitor& visitor) const
+void NetworkImpl::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
{
@@ -2021,7 +2510,7 @@ void Network::Accept(ILayerVisitor& visitor) const
};
}
-void Network::ExecuteStrategy(IStrategy& strategy) const
+void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
{
for (auto layer : GetGraph())
{
@@ -2029,17 +2518,17 @@ void Network::ExecuteStrategy(IStrategy& strategy) const
};
}
-OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
+OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
{
}
-OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
+OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
: m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
{
}
-OptimizedNetwork::~OptimizedNetwork()
+OptimizedNetworkImpl::~OptimizedNetworkImpl()
{
}
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index cffade5a21..8f16be1684 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -19,246 +19,249 @@
#include "Graph.hpp"
#include "Layer.hpp"
+#include "OptimizedNetworkImpl.hpp"
namespace armnn
{
class Graph;
+using NetworkImplPtr = std::unique_ptr<NetworkImpl, void(*)(NetworkImpl* network)>;
+
/// Private implementation of INetwork.
-class Network final : public INetwork
+class NetworkImpl
{
public:
- Network(NetworkOptions networkOptions = {});
- ~Network();
+ NetworkImpl(NetworkOptions networkOptions = {});
+ ~NetworkImpl();
const Graph& GetGraph() const { return *m_Graph; }
- Status PrintGraph() override;
+ Status PrintGraph();
- IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override;
+ IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr);
IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
IConnectableLayer* AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddDequantizeLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
IConnectableLayer* AddDetectionPostProcessLayer(
const DetectionPostProcessDescriptor& descriptor,
const ConstTensor& anchors,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("This AddGatherLayer overload is deprecated")
- IConnectableLayer* AddGatherLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddGatherLayer(const char* name = nullptr);
IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr) override;
+ IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddAbsLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddAbsLayer(const char* name = nullptr);
- IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
- IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
const ConstTensor& mean,
const ConstTensor& variance,
const ConstTensor& beta,
const ConstTensor& gamma,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddRankLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddRankLayer(const char* name = nullptr);
ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr) override;
+ IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr);
IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddFloorLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddFloorLayer(const char* name = nullptr);
- IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) override;
+ IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr);
IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
const LstmInputParams& params,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddDivisionLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
- IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
- IConnectableLayer* AddMaximumLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
- IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override;
+ IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
- IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr) override;
+ IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr);
- IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- IConnectableLayer* AddMinimumLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddGreaterLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddEqualLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddEqualLayer(const char* name = nullptr);
ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
- IConnectableLayer* AddMergeLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddMergeLayer(const char* name = nullptr);
- IConnectableLayer* AddSwitchLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
- IConnectableLayer* AddPreluLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddPreluLayer(const char* name = nullptr);
IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
const LstmInputParams& params,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
- const char* name = nullptr) override;
+ const char* name = nullptr);
IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
- const char* name = nullptr) override;
+ const char* name = nullptr);
- void Accept(ILayerVisitor& visitor) const override;
+ void Accept(ILayerVisitor& visitor) const;
- void ExecuteStrategy(IStrategy& strategy) const override;
+ void ExecuteStrategy(IStrategy& strategy) const;
private:
IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
@@ -284,29 +287,6 @@ private:
ModelOptions m_ModelOptions;
};
-class OptimizedNetwork final : public IOptimizedNetwork
-{
-public:
- OptimizedNetwork(std::unique_ptr<Graph> graph);
- OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
- ~OptimizedNetwork();
-
- Status PrintGraph() override;
- Status SerializeToDot(std::ostream& stream) const override;
-
- profiling::ProfilingGuid GetGuid() const final { return m_Guid; };
-
- Graph& GetGraph() { return *m_Graph; }
- ModelOptions& GetModelOptions() { return m_ModelOptions; }
-
-private:
- std::unique_ptr<Graph> m_Graph;
- profiling::ProfilingGuid m_Guid;
- ModelOptions m_ModelOptions;
-};
-
-
-
struct OptimizationResult
{
bool m_Warning;
@@ -338,7 +318,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
bool importEnabled,
Optional<std::vector<std::string>&> errMessages);
-OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
BackendSettings& backendSettings,
Graph::Iterator& firstLayer,
Graph::Iterator& lastLayer,
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp
index eed3f41bdc..06d8c5d0f2 100644
--- a/src/armnn/NetworkQuantizer.cpp
+++ b/src/armnn/NetworkQuantizer.cpp
@@ -50,7 +50,7 @@ void INetworkQuantizer::Destroy(INetworkQuantizer *quantizer)
void NetworkQuantizer::OverrideInputRange(LayerBindingId layerId, float min, float max)
{
- const Graph& graph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph();
+ const Graph& graph = m_InputNetwork->pNetworkImpl->GetGraph();
auto inputLayers = graph.GetInputLayers();
// Walk the input layers of the graph and override the quantization parameters of the one with the given id
@@ -69,7 +69,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors)
{
m_RefineCount = 0;
m_Ranges.SetDynamicMode(true);
- const Graph& cGraph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort();
+ const Graph& cGraph = m_InputNetwork->pNetworkImpl->GetGraph().TopologicalSort();
// need to insert Debug layers in the DynamicQuantizationStrategy
Graph& graph = const_cast<Graph&>(cGraph);
@@ -136,7 +136,7 @@ void NetworkQuantizer::Refine(const InputTensors& inputTensors)
INetworkPtr NetworkQuantizer::ExportNetwork()
{
- const Graph& graph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort();
+ const Graph& graph = m_InputNetwork->pNetworkImpl->GetGraph().TopologicalSort();
// Step 1) Walk the graph and populate default min/max values for
// intermediate tensors, only if Runtime does not exist (created
diff --git a/src/armnn/OptimizedNetworkImpl.hpp b/src/armnn/OptimizedNetworkImpl.hpp
new file mode 100644
index 0000000000..25bf9ca49c
--- /dev/null
+++ b/src/armnn/OptimizedNetworkImpl.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+namespace armnn
+{
+
+class OptimizedNetworkImpl
+{
+public:
+ OptimizedNetworkImpl(std::unique_ptr<Graph> graph);
+ OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
+ virtual ~OptimizedNetworkImpl();
+
+ virtual Status PrintGraph();
+ virtual Status SerializeToDot(std::ostream& stream) const;
+
+ virtual profiling::ProfilingGuid GetGuid() const { return m_Guid; };
+
+ Graph& GetGraph() { return *m_Graph; }
+ ModelOptions& GetModelOptions() { return m_ModelOptions; }
+
+private:
+ std::unique_ptr<Graph> m_Graph;
+ profiling::ProfilingGuid m_Guid;
+ ModelOptions m_ModelOptions;
+};
+
+}
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index 8fdc4f1e0a..9cc7b2cb81 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -135,7 +135,7 @@ Status RuntimeImpl::LoadNetwork(NetworkId& networkIdOut,
}
unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
- std::unique_ptr<OptimizedNetwork>(PolymorphicDowncast<OptimizedNetwork*>(rawNetwork)),
+ std::unique_ptr<IOptimizedNetwork>(rawNetwork),
errorMessage,
networkProperties,
m_ProfilingService);
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index ab83a891a1..f3485c704b 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -282,7 +282,7 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
layer->Accept(visitor);
@@ -306,7 +306,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
layer->Accept(visitor);
@@ -335,7 +335,7 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
layer->Accept(visitor);
@@ -365,7 +365,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
layer->Accept(visitor);
@@ -388,7 +388,7 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
layer->Accept(visitor);
@@ -412,7 +412,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor,
weights,
@@ -444,7 +444,7 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
layer->Accept(visitor);
@@ -474,7 +474,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
layer->Accept(visitor);
@@ -491,7 +491,7 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional());
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional());
layer->Accept(visitor);
@@ -509,7 +509,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional(), layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional(), layerName);
layer->Accept(visitor);
@@ -532,7 +532,7 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases);
layer->Accept(visitor);
@@ -556,7 +556,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases, layerName);
layer->Accept(visitor);
@@ -586,7 +586,7 @@ BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
layer->Accept(visitor);
@@ -617,7 +617,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
descriptor, mean, variance, beta, gamma, layerName);
@@ -632,7 +632,7 @@ BOOST_AUTO_TEST_CASE(CheckConstLayer)
TestConstantLayerVisitor visitor(input);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddConstantLayer(input);
layer->Accept(visitor);
@@ -647,7 +647,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
TestConstantLayerVisitor visitor(input, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
layer->Accept(visitor);
@@ -719,7 +719,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
TestLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -792,7 +792,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -883,7 +883,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -975,7 +975,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -1062,7 +1062,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
TestLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1176,7 +1176,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeepholeCifgDisabled)
TestLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1263,7 +1263,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -1350,7 +1350,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
TestLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1437,7 +1437,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
TestLstmLayerVisitor visitor(descriptor, params, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -1509,7 +1509,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerBasic)
TestQLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1582,7 +1582,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedQLstmLayerBasic)
TestQLstmLayerVisitor visitor(descriptor, params, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
layer->Accept(visitor);
@@ -1677,7 +1677,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabled)
TestQLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1794,7 +1794,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledPeepholeEnabled)
TestQLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1884,7 +1884,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgEnabledPeepholeEnabled)
TestQLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -1974,7 +1974,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerProjectionEnabled)
TestQLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -2097,7 +2097,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledLayerNormEnabled)
TestQLstmLayerVisitor visitor(descriptor, params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
layer->Accept(visitor);
@@ -2187,7 +2187,7 @@ BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
TestQuantizedLstmLayerVisitor visitor(params);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
layer->Accept(visitor);
@@ -2277,7 +2277,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
TestQuantizedLstmLayerVisitor visitor(params, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
layer->Accept(visitor);
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index c07bf6a5bc..3ea2c35061 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -1974,11 +1974,11 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
{
IgnoreUnused(graph);
- // To create a PreCompiled layer, create a network and Optimize it.
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
// Add an input layer
- armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
+ armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
BOOST_TEST(inputLayer);
// ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
@@ -2021,7 +2021,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::ConstTensor biases(biasTensorInfo, biasData);
// Create convolution layer with biases
- convLayer = net.AddConvolution2dLayer(convDesc2d,
+ convLayer = net->AddConvolution2dLayer(convDesc2d,
weights,
Optional<ConstTensor>(biases),
convLayerName.c_str());
@@ -2029,7 +2029,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
else
{
// Create convolution layer without biases
- convLayer = net.AddConvolution2dLayer(convDesc2d,
+ convLayer = net->AddConvolution2dLayer(convDesc2d,
weights,
EmptyOptional(),
convLayerName.c_str());
@@ -2038,7 +2038,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
BOOST_TEST(convLayer);
// Add an output layer
- armnn::IConnectableLayer* const outputLayer = net.AddOutputLayer(0, "output layer");
+ armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
BOOST_TEST(outputLayer);
// set the tensors in the network (NHWC format)
@@ -2068,12 +2068,12 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::IRuntime::CreationOptions options;
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
armnn::OptimizerOptions optimizerOptions;
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);
BOOST_CHECK(optimizedNet != nullptr);
// Find the PreCompiled layer in the optimised graph
- armnn::Graph& optimisedGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
Layer* preCompiledLayer = nullptr;
for (auto& layer : optimisedGraph)
{
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index ef270d94ee..692d64e4e0 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_SUITE(Network)
BOOST_AUTO_TEST_CASE(LayerGuids)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
@@ -43,23 +43,22 @@ BOOST_AUTO_TEST_CASE(LayerGuids)
BOOST_AUTO_TEST_CASE(NetworkBasic)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
BOOST_TEST(net.PrintGraph() == armnn::Status::Success);
}
BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork)
{
- armnn::Network net;
- armnn::INetwork& inet = net;
- inet.AddInputLayer(0);
- inet.AddAdditionLayer();
- inet.AddActivationLayer(armnn::ActivationDescriptor());
- inet.AddOutputLayer(0);
+ armnn::INetworkPtr inet(armnn::INetwork::Create());
+ inet->AddInputLayer(0);
+ inet->AddAdditionLayer();
+ inet->AddActivationLayer(armnn::ActivationDescriptor());
+ inet->AddOutputLayer(0);
}
BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
net.AddInputLayer(0);
net.AddAdditionLayer();
net.AddActivationLayer(armnn::ActivationDescriptor());
@@ -68,7 +67,7 @@ BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
BOOST_AUTO_TEST_CASE(NetworkModification)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
BOOST_TEST(inputLayer);
@@ -228,7 +227,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
// Adds an input layer and an input tensor descriptor.
armnn::IConnectableLayer* inputLayer = net.AddInputLayer(0, "input layer");
@@ -285,7 +284,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
// Adds an input layer and an input tensor descriptor.
armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
@@ -330,7 +329,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
{
- armnn::Network net;
+ armnn::NetworkImpl net;
// Adds an input layer and an input tensor descriptor.
armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
@@ -476,7 +475,7 @@ BOOST_AUTO_TEST_CASE(Network_AddMerge)
BOOST_AUTO_TEST_CASE(StandInLayerNetworkTest)
{
// Create a simple network with a StandIn some place in it.
- armnn::Network net;
+ armnn::NetworkImpl net;
auto input = net.AddInputLayer(0);
// Add some valid layer.
@@ -509,7 +508,7 @@ BOOST_AUTO_TEST_CASE(StandInLayerNetworkTest)
BOOST_AUTO_TEST_CASE(StandInLayerSingleInputMultipleOutputsNetworkTest)
{
// Another test with one input and two outputs on the StandIn layer.
- armnn::Network net;
+ armnn::NetworkImpl net;
// Create the input.
auto input = net.AddInputLayer(0);
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index e7eab9d00d..fa860abb64 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -756,12 +756,10 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
-
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+ OptimizedNetworkImpl optNet(std::move(graph));
// Get the optimized graph
- Graph& optGraph = optNetObjPtr->GetGraph();
+ Graph& optGraph = optNet.GetGraph();
std::vector<BackendId> prefs{"MockBackend", "CustomBackend"};
@@ -773,6 +771,8 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
// Assign an available backend to each layer
Graph::Iterator firstLayer = optGraph.begin();
Graph::Iterator lastLayer = optGraph.end();
+
+ OptimizedNetworkImpl* optNetObjPtr = &optNet;
OptimizationResult res = AssignBackends(optNetObjPtr,
backendSettings,
firstLayer,
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 67d0f95292..a932698674 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -33,6 +33,70 @@ const float g_SymmS8QuantizationBase = 127.0f;
const float g_SymmS16QuantizationBase = 32767.0f;
const float g_TestTolerance = 0.000001f;
+class TestConnectionPreservation : public LayerVisitorBase<VisitorNoThrowPolicy>
+{
+public:
+ TestConnectionPreservation(INetwork* network)
+ : LayerVisitorBase<VisitorNoThrowPolicy>()
+ , m_Network(network)
+ {}
+
+ void VisitAdditionLayer(const IConnectableLayer* layer, const char*) override
+ {
+ CheckLayerName(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), "reLU1");
+ CheckLayerName(layer->GetInputSlot(1).GetConnection()->GetOwningLayerGuid(), "reLU2");
+ }
+
+ void CheckLayerName(LayerGuid guid, std::string expectedName)
+ {
+ auto graph = m_Network->pNetworkImpl->GetGraph();
+ bool guidFound = false;
+ for (Layer* layer : graph)
+ {
+ if (layer->GetGuid() == guid)
+ {
+ BOOST_CHECK_EQUAL(layer->GetName(), expectedName.c_str());
+ guidFound = true;
+ break;
+ }
+ }
+ if (!guidFound)
+ {
+ BOOST_FAIL("No layer matching the GUID was found");
+ }
+ }
+
+private:
+ INetwork* m_Network;
+};
+
+void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& visitor)
+{
+ auto graph = inputNetwork->pNetworkImpl->GetGraph().TopologicalSort();
+
+ ApplyStrategyToLayers(graph, visitor);
+}
+
+TensorInfo GetInputTensorInfo(const INetwork* network)
+{
+ for (auto&& inputLayer : network->pNetworkImpl->GetGraph().GetInputLayers())
+ {
+ ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ return inputLayer->GetOutputSlot(0).GetTensorInfo();
+ }
+ throw InvalidArgumentException("Network has no input layers");
+}
+
+TensorInfo GetInputTensorInfo(const NetworkImpl* network)
+{
+ for (auto&& inputLayer : network->GetGraph().GetInputLayers())
+ {
+ ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ return inputLayer->GetOutputSlot(0).GetTensorInfo();
+ }
+ throw InvalidArgumentException("Network has no input layers");
+}
+
BOOST_AUTO_TEST_SUITE(Quantizer)
class TestQuantization : public IStrategy
@@ -473,14 +537,6 @@ private:
QuantizerOptions m_QuantizerOptions;
};
-void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy)
-{
- auto network = PolymorphicDowncast<const Network*>(inputNetwork);
- auto graph = network->GetGraph().TopologicalSort();
-
- ApplyStrategyToLayers(graph, strategy);
-}
-
void TestNetwork(INetwork* network, const TensorShape inShape, const TensorShape outShape)
{
const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
@@ -596,21 +652,11 @@ INetworkPtr CreateNetworkWithInputOutputLayers()
return network;
}
-TensorInfo GetInputTensorInfo(const Network* network)
-{
- for (auto&& inputLayer : network->GetGraph().GetInputLayers())
- {
- ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
- return inputLayer->GetOutputSlot(0).GetTensorInfo();
- }
- throw InvalidArgumentException("Network has no input layers");
-}
-
BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant)
{
INetworkPtr network = CreateNetworkWithInputOutputLayers();
- armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast<const Network*>(network.get()));
+ armnn::TensorInfo tensorInfo = GetInputTensorInfo(network.get());
// Outliers -56 and 98
std::vector<float> inputData({0, 0, 0, -56, 98, 0, 0, 0});
@@ -870,7 +916,7 @@ BOOST_AUTO_TEST_CASE(OverrideInputRangeEmptyNetwork)
RangeTracker ranges;
RangeTracker::MinMaxRange minMaxRange(-12.3f, 45.6f); // Range to use for the override
- Network network; // Empty network
+ NetworkImpl network; // Empty network
auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers
OverrideInputRangeStrategy overrideInputRangeStrategy(ranges, 0, minMaxRange);
@@ -884,7 +930,7 @@ BOOST_AUTO_TEST_CASE(OverrideInputRangeNoInputLayers)
RangeTracker ranges;
MinMaxRange minMaxRange(-12.3f, 45.6f); // Range to use for the override
- Network network;
+ NetworkImpl network;
network.AddAdditionLayer(); // Network with no input layers
auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers
@@ -899,7 +945,7 @@ BOOST_AUTO_TEST_CASE(OverrideInputRangeInputLayers)
RangeTracker ranges;
MinMaxRange minMaxRange(-12.3f, 45.6f); // Range to use for the override
- Network network;
+ NetworkImpl network;
// Adding the layers
IConnectableLayer* input0 = network.AddInputLayer(0);
@@ -2117,16 +2163,25 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant)
Graph m_Graph;
};
- INetworkPtr network = INetwork::Create();
+ class TestNetwork : public INetwork
+ {
+ public :
+ NetworkImpl* GetPNetworkImpl()
+ {
+ return pNetworkImpl.get();
+ }
+ };
+
+ TestNetwork testNetwork;
- IConnectableLayer* inputLayer = network->AddInputLayer(0,"inputLayer1");
+ IConnectableLayer* inputLayer = testNetwork.AddInputLayer(0,"inputLayer1");
armnn::ActivationDescriptor ReLUDesc;
ReLUDesc.m_Function = ActivationFunction::ReLu;
- IConnectableLayer* reLULayer1 = network->AddActivationLayer(ReLUDesc, "reLU1");
- IConnectableLayer* reLULayer2 = network->AddActivationLayer(ReLUDesc, "reLU2");
- IConnectableLayer* addLayer1 = network->AddAdditionLayer("addLayer1");
- IConnectableLayer* outputLayer = network->AddOutputLayer(0,"outPutLayer1");
+ IConnectableLayer* reLULayer1 = testNetwork.AddActivationLayer(ReLUDesc, "reLU1");
+ IConnectableLayer* reLULayer2 = testNetwork.AddActivationLayer(ReLUDesc, "reLU2");
+ IConnectableLayer* addLayer1 = testNetwork.AddAdditionLayer("addLayer1");
+ IConnectableLayer* outputLayer = testNetwork.AddOutputLayer(0,"outPutLayer1");
inputLayer->GetOutputSlot(0).Connect(reLULayer1->GetInputSlot(0));
reLULayer1->GetOutputSlot(0).Connect(reLULayer2->GetInputSlot(0));
@@ -2139,12 +2194,12 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant)
reLULayer2->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32));
addLayer1->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32));
- TestConnectionPreservation strategy1(PolymorphicDowncast<const Network*>(network.get())->GetGraph());
- VisitLayersTopologically(network.get(), strategy1);
+ TestConnectionPreservation strategy1(testNetwork.GetPNetworkImpl()->GetGraph());
+ VisitLayersTopologically(&testNetwork, strategy1);
- armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(network.get());
+ armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(&testNetwork);
- armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast<const Network*>(network.get()));
+ armnn::TensorInfo tensorInfo = GetInputTensorInfo(&testNetwork);
std::vector<float> inputData({0, 2, 0, 4});
armnn::ConstTensor inputTensor(tensorInfo, inputData.data());
@@ -2155,7 +2210,9 @@ BOOST_AUTO_TEST_CASE(TestConnectionPreservationAfterDynamicQuant)
INetworkPtr quantNetwork = quantizer->ExportNetwork();
- TestConnectionPreservation strategy2(PolymorphicDowncast<const Network*>(quantNetwork.get())->GetGraph());
+ TestNetwork* testQuantNetwork = static_cast<TestNetwork*>(quantNetwork.get());
+
+ TestConnectionPreservation strategy2(testQuantNetwork->GetPNetworkImpl()->GetGraph());
VisitLayersTopologically(quantNetwork.get(), strategy2);
}
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 1d5960b2a4..c5457d03f3 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -135,7 +135,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::unique_ptr<armnn::Network> mockNetwork1 = std::make_unique<armnn::Network>();
+ armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
mockNetwork1->AddInputLayer(0, "test layer");
// Warm-up load/unload pair to put the runtime in a stable state (memory-wise).
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 35ffc55e55..6563517da1 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -14,7 +14,7 @@ BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndName)
{
const char* layerName = "InputLayer";
TestInputLayerVisitor visitor(1, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer *const layer = net.AddInputLayer(1, layerName);
layer->Accept(visitor);
@@ -23,7 +23,7 @@ BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndName)
BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndNameNull)
{
TestInputLayerVisitor visitor(1);
- Network net;
+ NetworkImpl net;
IConnectableLayer *const layer = net.AddInputLayer(1);
layer->Accept(visitor);
@@ -33,7 +33,7 @@ BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndName)
{
const char* layerName = "OutputLayer";
TestOutputLayerVisitor visitor(1, layerName);
- Network net;
+ NetworkImpl net;
IConnectableLayer *const layer = net.AddOutputLayer(1, layerName);
layer->Accept(visitor);
@@ -42,7 +42,7 @@ BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndName)
BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndNameNull)
{
TestOutputLayerVisitor visitor(1);
- Network net;
+ NetworkImpl net;
IConnectableLayer *const layer = net.AddOutputLayer(1);
layer->Accept(visitor);
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 7d4dcaae0e..39e254339f 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -16,7 +16,7 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameAndDescriptor) \
const char* layerName = "name##Layer"; \
armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
Test##name##LayerVisitor visitor(descriptor, layerName); \
- armnn::Network net; \
+ armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor, layerName); \
layer->Accept(visitor); \
}
@@ -26,7 +26,7 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptrAndDescriptor) \
{ \
armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
Test##name##LayerVisitor visitor(descriptor); \
- armnn::Network net; \
+ armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor); \
layer->Accept(visitor); \
}
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 994375d435..971d7eeab7 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -16,7 +16,7 @@ namespace
BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
{ \
Test##name##LayerVisitor visitor("name##Layer"); \
- armnn::Network net; \
+ armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer("name##Layer"); \
layer->Accept(visitor); \
}
@@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptr) \
{ \
Test##name##LayerVisitor visitor; \
- armnn::Network net; \
+ armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(); \
layer->Accept(visitor); \
}
diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp
index 440d4e09f3..6020c7631c 100644
--- a/src/armnn/test/TestUtils.cpp
+++ b/src/armnn/test/TestUtils.cpp
@@ -22,6 +22,16 @@ void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const
namespace armnn
{
+Graph& GetGraphForTesting(IOptimizedNetwork* optNet)
+{
+ return optNet->pOptimizedNetworkImpl->GetGraph();
+}
+
+ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNet)
+{
+ return optNet->pOptimizedNetworkImpl->GetModelOptions();
+}
+
profiling::ProfilingService& GetProfilingService(armnn::RuntimeImpl* runtime)
{
return runtime->m_ProfilingService;
diff --git a/src/armnn/test/TestUtils.hpp b/src/armnn/test/TestUtils.hpp
index bf222b3c56..fa9156bc09 100644
--- a/src/armnn/test/TestUtils.hpp
+++ b/src/armnn/test/TestUtils.hpp
@@ -51,7 +51,8 @@ bool CheckRelatedLayers(armnn::Graph& graph, const std::list<std::string>& testR
namespace armnn
{
-
+Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
+ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
profiling::ProfilingService& GetProfilingService(RuntimeImpl* runtime);
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index c8adea2132..71a554b567 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -345,7 +345,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// Optimise ArmNN network
IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
- Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
+ Graph& graphFused = GetGraphForTesting(optNetFused.get());
auto checkFusedConv2d = [](const Layer* const layer)->bool {
return IsLayerOfType<LayerType>(layer) &&
@@ -386,7 +386,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// Optimise ArmNN network
IOptimizedNetworkPtr optNetNotFused = Optimize(*networkNotFused, {backendId}, runNotFused->GetDeviceSpec());
- Graph graphNotFused = PolymorphicDowncast<OptimizedNetwork*>(optNetNotFused.get())->GetGraph();
+ Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
BOOST_CHECK(5 == graphNotFused.GetNumLayers());
BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
@@ -443,8 +443,6 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
// Optimise ArmNN network
IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
- Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
-
// Load network into runtime
NetworkId networkIdentifier;
BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index bf47c577a4..be66c5e4af 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -186,7 +186,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// Optimise ArmNN network
IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
- Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
+ Graph& graphFused = GetGraphForTesting(optNetFused.get());
auto checkFusedConv2d = [ ](const armnn::Layer* const layer) -> bool
{
@@ -233,7 +233,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// Optimise ArmNN network
IOptimizedNetworkPtr optNetNotFused = Optimize(*networkNotFused, {backendId}, runNotFused->GetDeviceSpec());
- Graph graphNotFused = PolymorphicDowncast<OptimizedNetwork*>(optNetNotFused.get())->GetGraph();
+ Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
BOOST_CHECK(5 == graphNotFused.GetNumLayers());
BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index 6bfd7e301f..b47e3c7296 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -50,6 +50,36 @@ INetworkPtr CreateTestNetwork()
}
/// Shared function for the below tests, so that we test the same network in both cases.
+std::unique_ptr<NetworkImpl> CreateTestNetworkImpl()
+{
+ std::unique_ptr<NetworkImpl> network(new NetworkImpl());
+
+ auto input = network->AddInputLayer(0, "input");
+ const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+ // Insert Permute which swaps batches and channels dimensions
+ auto permute = network->AddPermuteLayer(PermuteDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute");
+ const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32);
+ permute->GetOutputSlot(0).SetTensorInfo(permuteInfo);
+ input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
+
+ // Insert BatchToSpace
+ BatchToSpaceNdDescriptor batchToSpaceDesc;
+ batchToSpaceDesc.m_BlockShape = { 2, 2 };
+ batchToSpaceDesc.m_DataLayout = DataLayout::NHWC;
+ auto batchToSpace = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace");
+ const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32);
+ batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo);
+ permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0));
+
+ auto output = network->AddOutputLayer(0, "output");
+ batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ return network;
+}
+
+/// Shared function for the below tests, so that we test the same network in both cases.
INetworkPtr CreateTransposeTestNetwork()
{
// Create a network
@@ -80,14 +110,45 @@ INetworkPtr CreateTransposeTestNetwork()
return network;
}
+/// Shared function for the below tests, so that we test the same network in both cases.
+std::unique_ptr<NetworkImpl> CreateTransposeTestNetworkImpl()
+{
+ // Create a network
+ std::unique_ptr<NetworkImpl> network(new NetworkImpl());
+
+ auto input = network->AddInputLayer(0, "input");
+ const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+ // Insert Permute which swaps batches and channels dimensions
+ auto permute = network->AddTransposeLayer(TransposeDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute");
+ const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32);
+ permute->GetOutputSlot(0).SetTensorInfo(permuteInfo);
+ input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
+
+ // Insert BatchToSpace
+ BatchToSpaceNdDescriptor batchToSpaceDesc;
+ batchToSpaceDesc.m_BlockShape = { 2, 2 };
+ batchToSpaceDesc.m_DataLayout = DataLayout::NHWC;
+ auto batchToSpace = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace");
+ const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32);
+ batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo);
+ permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0));
+
+ auto output = network->AddOutputLayer(0, "output");
+ batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ return network;
+}
+
} // namespace
/// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
/// Note this does not ensure the correctness of the optimization - that is done in the below test.
BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
{
- INetworkPtr network = CreateTestNetwork();
- Graph graph = static_cast<Network*>(network.get())->GetGraph();
+ std::unique_ptr<NetworkImpl> network = CreateTestNetworkImpl();
+ Graph graph = network.get()->GetGraph();
// Confirm initial graph is as we expect
BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
@@ -116,8 +177,8 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
/// Note this does not ensure the correctness of the optimization - that is done in the below test.
BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
{
- INetworkPtr network = CreateTransposeTestNetwork();
- Graph graph = static_cast<Network*>(network.get())->GetGraph();
+ std::unique_ptr<NetworkImpl> network = CreateTransposeTestNetworkImpl();
+ Graph graph = network.get()->GetGraph();
// Confirm initial graph is as we expect
BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
@@ -155,7 +216,7 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec());
// Confirm that the optimization has actually taken place
- const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph();
+ const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
&IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
@@ -202,7 +263,7 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec());
// Confirm that the optimization has actually taken place
- const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph();
+ const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
&IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index cb4173a620..304520c24f 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -241,8 +241,7 @@ BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPo
auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
- auto optimizedNetwork = armnn::PolymorphicDowncast<armnn::OptimizedNetwork*>(optimized.get());
- auto graph = optimizedNetwork->GetGraph();
+ armnn::Graph& graph = GetGraphForTesting(optimized.get());
// Check the number of layers in the graph
BOOST_TEST((graph.GetNumInputs() == 2));
diff --git a/src/armnnTfParser/test/Assert.cpp b/src/armnnTfParser/test/Assert.cpp
index b978f0264d..0665be7c7e 100644
--- a/src/armnnTfParser/test/Assert.cpp
+++ b/src/armnnTfParser/test/Assert.cpp
@@ -102,8 +102,7 @@ BOOST_FIXTURE_TEST_CASE(AssertSimpleGraphStructureTest, AssertSimpleFixture)
{
auto optimized = SetupOptimizedNetwork({ { "Placeholder", { 1, 1, 1, 4 } } }, { "Add" });
- auto optimizedNetwork = armnn::PolymorphicDowncast<armnn::OptimizedNetwork*>(optimized.get());
- auto graph = optimizedNetwork->GetGraph();
+ armnn::Graph& graph = GetGraphForTesting(optimized.get());
BOOST_TEST((graph.GetNumInputs() == 1));
BOOST_TEST((graph.GetNumOutputs() == 1));
@@ -258,8 +257,7 @@ BOOST_FIXTURE_TEST_CASE(AssertGraphStructureTest, AssertFixture)
{ "Input1", { 1, 1, 2, 2 } } },
{ "Output" });
- auto optimizedNetwork = armnn::PolymorphicDowncast<armnn::OptimizedNetwork*>(optimized.get());
- auto graph = optimizedNetwork->GetGraph();
+ armnn::Graph& graph = GetGraphForTesting(optimized.get());
BOOST_TEST((graph.GetNumInputs() == 2));
BOOST_TEST((graph.GetNumOutputs() == 1));
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6ab6d2c8ac..13fd190ea2 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -7,6 +7,7 @@
#include <armnn/TensorFwd.hpp>
#include <armnn/Optional.hpp>
#include <armnn/backends/ITensorHandle.hpp>
+#include <armnn/INetwork.hpp>
#include <backendsCommon/Workload.hpp>
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index c972b4b15f..b472a0321d 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -212,8 +212,8 @@ BOOST_AUTO_TEST_CASE(OptimizeViewsValidateDeviceMockBackend)
BOOST_CHECK(optNet);
// Check the optimised graph
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- CheckLayers(optNetObjPtr->GetGraph());
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ CheckLayers(graph);
}
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 721dfb004c..66d166fc08 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -15,12 +15,13 @@ BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
BOOST_AUTO_TEST_CASE(SerializeToDot)
{
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
//Defines layers.
- auto input = net.AddInputLayer(0);
- auto add = net.AddAdditionLayer();
- auto output = net.AddOutputLayer(0);
+ auto input = net->AddInputLayer(0);
+ auto add = net->AddAdditionLayer();
+ auto output = net->AddOutputLayer(0);
// Connects layers.
input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -36,7 +37,7 @@ BOOST_AUTO_TEST_CASE(SerializeToDot)
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
std::ostringstream ss;
optimizedNet->SerializeToDot(ss);
@@ -127,7 +128,10 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
BOOST_REQUIRE(optNet);
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
+ for (auto&& layer : graph)
{
// If NEON is enabled, Input and Output layers are supported by CpuAcc,
// the other layers are supported by CpuRef.
@@ -151,7 +155,8 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
{
const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
armnn::NormalizationDescriptor nmDesc;
armnn::ActivationDescriptor acDesc;
@@ -167,21 +172,21 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
// sm
// |
// ot
- armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+ armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
layer->GetOutputSlot(0).SetTensorInfo(desc);
- armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+ armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
normLayer->GetOutputSlot(0).SetTensorInfo(desc);
- layer = net.AddActivationLayer(acDesc, "ac");
+ layer = net->AddActivationLayer(acDesc, "ac");
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
armnn::IConnectableLayer* prevLayer = layer;
- layer = net.AddMultiplicationLayer("ml");
+ layer = net->AddMultiplicationLayer("ml");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -189,13 +194,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
prevLayer = layer;
armnn::SoftmaxDescriptor softmaxDescriptor;
- layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+ layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
prevLayer = layer;
- layer = net.AddOutputLayer(0, "ot");
+ layer = net->AddOutputLayer(0, "ot");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -207,7 +212,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
try
{
- Optimize(net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
BOOST_FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException& e)
@@ -221,7 +226,8 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
{
const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
armnn::NormalizationDescriptor nmDesc;
armnn::ActivationDescriptor acDesc;
@@ -237,21 +243,21 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
// sm
// |
// ot
- armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+ armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
layer->GetOutputSlot(0).SetTensorInfo(desc);
- armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+ armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
normLayer->GetOutputSlot(0).SetTensorInfo(desc);
- layer = net.AddActivationLayer(acDesc, "ac");
+ layer = net->AddActivationLayer(acDesc, "ac");
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
armnn::IConnectableLayer* prevLayer = layer;
- layer = net.AddMultiplicationLayer("ml");
+ layer = net->AddMultiplicationLayer("ml");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -259,13 +265,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
prevLayer = layer;
armnn::SoftmaxDescriptor softmaxDescriptor;
- layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+ layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
prevLayer = layer;
- layer = net.AddOutputLayer(0, "ot");
+ layer = net->AddOutputLayer(0, "ot");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -274,12 +280,15 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
- armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
BOOST_CHECK(optNet);
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
// validate workloads
armnn::RefWorkloadFactory fact;
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
BOOST_CHECK_NO_THROW(
@@ -316,7 +325,10 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
BOOST_REQUIRE(optNet);
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
+ for (auto&& layer : graph)
{
// If NEON is enabled, Input and Output layers are supported by CpuAcc,
// the other layers are supported by CpuRef.
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 5885cbe8ef..4384ae5fec 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -51,8 +51,7 @@ BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -177,8 +176,7 @@ BOOST_AUTO_TEST_CASE(ClImportDisabledFallbackToNeon)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -307,8 +305,7 @@ BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -445,8 +442,7 @@ BOOST_AUTO_TEST_CASE(ClImportDisableFallbackSubgraphToNeon)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index dddc5aa8bc..a41c5f87e9 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -39,7 +39,9 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
// validate workloads
armnn::ClWorkloadFactory fact =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+
+ const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
+ for (auto&& layer : theGraph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
BOOST_CHECK_NO_THROW(
@@ -59,17 +61,17 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
// if there are inverse conversion layers remove them with optimization
// at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
// and inverse conversion layers are removed by the optimizer
- armnn::Network net;
+ armnn::INetworkPtr net(armnn::INetwork::Create());
// Defines layers.
- auto input = net.AddInputLayer(0, "input layer");
+ auto input = net->AddInputLayer(0, "input layer");
// ReLu1
armnn::ActivationDescriptor activation1Descriptor;
activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
activation1Descriptor.m_A = 1.f;
activation1Descriptor.m_B = -1.f;
- auto activation = net.AddActivationLayer(activation1Descriptor, "activation layer");
- auto output = net.AddOutputLayer(0, "output layer");
+ auto activation = net->AddActivationLayer(activation1Descriptor, "activation layer");
+ auto output = net->AddOutputLayer(0, "output layer");
// Connects layers.
input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
@@ -89,9 +91,9 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
optimizerOptions.m_ReduceFp32ToFp16 = true;
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
- net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+ *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
- const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
// Tests that all layers are present in the graph.
BOOST_TEST(graph.GetNumLayers() == 5);
@@ -127,7 +129,7 @@ BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
BOOST_CHECK(optimizedNet);
- auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+ auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
BOOST_TEST(modelOptionsOut.size() == 1);
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index fd7fbbc4d5..2d70cc2b1b 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -62,8 +62,7 @@ BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -200,8 +199,7 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -331,8 +329,7 @@ BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -469,8 +466,7 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -598,8 +594,7 @@ BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -723,8 +718,7 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -849,8 +843,7 @@ BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -979,8 +972,7 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -1121,8 +1113,7 @@ BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 85f06174c7..4944c31d71 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -35,7 +35,8 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
armnn::NeonWorkloadFactory fact =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
BOOST_CHECK_NO_THROW(
@@ -103,7 +104,7 @@ BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
BOOST_CHECK(optimizedNet);
- auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+ auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
BOOST_TEST(modelOptionsOut.size() == 1);
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
@@ -134,8 +135,10 @@ BOOST_AUTO_TEST_CASE(NumberOfThreadsTestOnCpuAcc)
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
BOOST_CHECK(optimizedNet);
+ std::unique_ptr<armnn::Graph> graphPtr;
+ armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
- auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+ auto modelOptionsOut = impl.GetModelOptions();
BOOST_TEST(modelOptionsOut.size() == 1);
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index e6d740280d..0e24e9505b 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -128,7 +128,7 @@ BOOST_AUTO_TEST_CASE(ConcatOnXorYSubTensorsNoPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
@@ -211,7 +211,7 @@ BOOST_AUTO_TEST_CASE(ConcatonXorYPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
@@ -380,7 +380,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
@@ -555,7 +555,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 16ff202f70..086c1e471a 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -71,12 +71,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers();
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
BOOST_CHECK(optNet);
// Validates workloads.
armnn::RefWorkloadFactory fact;
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ for (auto&& layer : graph)
{
BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
}
@@ -109,7 +110,10 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
// optimize the network
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
@@ -141,8 +145,9 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
// optimize the network
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
@@ -183,7 +188,9 @@ BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef)
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);
- const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
+ graph.AllocateDynamicBuffers();
+
// Tests that all layers are present in the graph.
BOOST_TEST(graph.GetNumLayers() == 5);