From ef33cb192eef332fb3a26be742b341288421e5bc Mon Sep 17 00:00:00 2001 From: Kevin May Date: Fri, 29 Jan 2021 14:24:57 +0000 Subject: IVGCVSW-5592 Implement Pimpl Idiom for Caffe and Onnx Parsers Signed-off-by: Kevin May Change-Id: I760dc4f33c0f87113cda2fa924da70f2e8c19025 --- include/armnnCaffeParser/ICaffeParser.hpp | 27 ++- include/armnnOnnxParser/IOnnxParser.hpp | 18 +- src/armnnCaffeParser/CaffeParser.cpp | 193 +++++++++++-------- src/armnnCaffeParser/CaffeParser.hpp | 27 ++- src/armnnCaffeParser/RecordByRecordCaffeParser.cpp | 2 +- src/armnnCaffeParser/RecordByRecordCaffeParser.hpp | 2 +- src/armnnOnnxParser/OnnxParser.cpp | 208 ++++++++++++--------- src/armnnOnnxParser/OnnxParser.hpp | 17 +- src/armnnOnnxParser/test/GetInputsOutputs.cpp | 20 +- 9 files changed, 301 insertions(+), 213 deletions(-) diff --git a/include/armnnCaffeParser/ICaffeParser.hpp b/include/armnnCaffeParser/ICaffeParser.hpp index a1ba59fbc6..0e31ad4461 100644 --- a/include/armnnCaffeParser/ICaffeParser.hpp +++ b/include/armnnCaffeParser/ICaffeParser.hpp @@ -29,31 +29,38 @@ public: static void Destroy(ICaffeParser* parser); /// Create the network from a protobuf text file on the disk. - virtual armnn::INetworkPtr CreateNetworkFromTextFile( + armnn::INetworkPtr CreateNetworkFromTextFile( const char* graphFile, const std::map& inputShapes, - const std::vector& requestedOutputs) = 0; + const std::vector& requestedOutputs); /// Create the network from a protobuf binary file on the disk. - virtual armnn::INetworkPtr CreateNetworkFromBinaryFile( + armnn::INetworkPtr CreateNetworkFromBinaryFile( const char* graphFile, const std::map& inputShapes, - const std::vector& requestedOutputs) = 0; + const std::vector& requestedOutputs); /// Create the network directly from protobuf text in a string. Useful for debugging/testin.g - virtual armnn::INetworkPtr CreateNetworkFromString( + armnn::INetworkPtr CreateNetworkFromString( const char* protoText, const std::map& inputShapes, - const std::vector& requestedOutputs) = 0; + const std::vector& requestedOutputs); /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name. - virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const = 0; + BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const; /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name. - virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const = 0; + BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const; -protected: - virtual ~ICaffeParser() {}; +private: + friend class CaffeParser; + friend class RecordByRecordCaffeParser; + + ICaffeParser(); + ~ICaffeParser(); + + class CaffeParserImpl; + std::unique_ptr pCaffeParserImpl; }; } \ No newline at end of file diff --git a/include/armnnOnnxParser/IOnnxParser.hpp b/include/armnnOnnxParser/IOnnxParser.hpp index d6b9399dd5..f9d692738d 100644 --- a/include/armnnOnnxParser/IOnnxParser.hpp +++ b/include/armnnOnnxParser/IOnnxParser.hpp @@ -16,6 +16,7 @@ namespace armnnOnnxParser using BindingPointInfo = armnn::BindingPointInfo; +class OnnxParserImpl; class IOnnxParser; using IOnnxParserPtr = std::unique_ptr; @@ -27,22 +28,25 @@ public: static void Destroy(IOnnxParser* parser); /// Create the network from a protobuf binary file on disk - virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) = 0; + armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile); /// Create the network from a protobuf text file on disk - virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile) = 0; + armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile); /// Create the network directly from protobuf text in a string. Useful for debugging/testing - virtual armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText) = 0; + armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText); /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name - virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const = 0; + BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const; /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name - virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const = 0; + BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const; - protected: - virtual ~IOnnxParser() {}; +private: + IOnnxParser(); + ~IOnnxParser(); + + std::unique_ptr pOnnxParserImpl; }; } diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp index 3ab473c2b4..dfb9cec206 100644 --- a/src/armnnCaffeParser/CaffeParser.cpp +++ b/src/armnnCaffeParser/CaffeParser.cpp @@ -60,6 +60,59 @@ using namespace caffe; using namespace std; using namespace google::protobuf::io; +ICaffeParser::ICaffeParser() : pCaffeParserImpl(new RecordByRecordCaffeParser()) {} + +ICaffeParser::~ICaffeParser() = default; + +ICaffeParser* ICaffeParser::CreateRaw() +{ + return new ICaffeParser(); +} + +ICaffeParserPtr ICaffeParser::Create() +{ + return ICaffeParserPtr(CreateRaw(), &ICaffeParser::Destroy); +} + +void ICaffeParser::Destroy(ICaffeParser* parser) +{ + delete parser; +} + +armnn::INetworkPtr ICaffeParser::CreateNetworkFromTextFile( + const char* graphFile, + const std::map& inputShapes, + const std::vector& requestedOutputs) +{ + return pCaffeParserImpl->CreateNetworkFromTextFile(graphFile, inputShapes, requestedOutputs); +} + +armnn::INetworkPtr ICaffeParser::CreateNetworkFromBinaryFile( + const char* graphFile, + const std::map& inputShapes, + const std::vector& requestedOutputs) +{ + return pCaffeParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes,requestedOutputs); +} + +armnn::INetworkPtr ICaffeParser::CreateNetworkFromString( + const char* protoText, + const std::map& inputShapes, + const std::vector& requestedOutputs) +{ + return pCaffeParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs); +} + +BindingPointInfo ICaffeParser::GetNetworkInputBindingInfo(const std::string& name) const +{ + return pCaffeParserImpl->GetNetworkInputBindingInfo(name); +} + +BindingPointInfo ICaffeParser::GetNetworkOutputBindingInfo(const std::string& name) const +{ + return pCaffeParserImpl->GetNetworkOutputBindingInfo(name); +} + namespace { @@ -232,63 +285,49 @@ ValueType GetOptionalWithFallback(const ParamType& param, } // namespace -const std::map - CaffeParserBase::ms_CaffeLayerNameToParsingFunctions = { - { "Input", &CaffeParserBase::ParseInputLayer }, - { "Convolution", &CaffeParserBase::ParseConvLayer }, - { "Deconvolution",&CaffeParserBase::ParseDeconvLayer }, - { "Pooling", &CaffeParserBase::ParsePoolingLayer }, - { "ReLU", &CaffeParserBase::ParseReluLayer }, - { "LRN", &CaffeParserBase::ParseLRNLayer }, - { "InnerProduct", &CaffeParserBase::ParseInnerProductLayer }, - { "Softmax", &CaffeParserBase::ParseSoftmaxLayer }, - { "Eltwise", &CaffeParserBase::ParseEltwiseLayer }, - { "Concat", &CaffeParserBase::ParseConcatLayer }, - { "BatchNorm", &CaffeParserBase::ParseBatchNormLayer }, - { "Scale", &CaffeParserBase::ParseScaleLayer }, - { "Split", &CaffeParserBase::ParseSplitLayer }, - { "Dropout", &CaffeParserBase::ParseDropoutLayer}, - { "ArgMax", &CaffeParserBase::ParseArgmaxLayer}, +const std::map + ICaffeParser::CaffeParserImpl::ms_CaffeLayerNameToParsingFunctions = { + { "Input", &CaffeParserImpl::ParseInputLayer }, + { "Convolution", &CaffeParserImpl::ParseConvLayer }, + { "Deconvolution",&CaffeParserImpl::ParseDeconvLayer }, + { "Pooling", &CaffeParserImpl::ParsePoolingLayer }, + { "ReLU", &CaffeParserImpl::ParseReluLayer }, + { "LRN", &CaffeParserImpl::ParseLRNLayer }, + { "InnerProduct", &CaffeParserImpl::ParseInnerProductLayer }, + { "Softmax", &CaffeParserImpl::ParseSoftmaxLayer }, + { "Eltwise", &CaffeParserImpl::ParseEltwiseLayer }, + { "Concat", &CaffeParserImpl::ParseConcatLayer }, + { "BatchNorm", &CaffeParserImpl::ParseBatchNormLayer }, + { "Scale", &CaffeParserImpl::ParseScaleLayer }, + { "Split", &CaffeParserImpl::ParseSplitLayer }, + { "Dropout", &CaffeParserImpl::ParseDropoutLayer}, + { "ArgMax", &CaffeParserImpl::ParseArgmaxLayer}, }; -ICaffeParser* ICaffeParser::CreateRaw() -{ - return new RecordByRecordCaffeParser(); -} - -ICaffeParserPtr ICaffeParser::Create() -{ - return ICaffeParserPtr(CreateRaw(), &ICaffeParser::Destroy); -} - -void ICaffeParser::Destroy(ICaffeParser* parser) -{ - delete parser; -} - -CaffeParserBase::CaffeParserBase() +ICaffeParser::CaffeParserImpl::CaffeParserImpl() : m_Network(nullptr, nullptr) { } CaffeParser::CaffeParser() -: CaffeParserBase() +: CaffeParserImpl() { } -BindingPointInfo CaffeParserBase::GetNetworkInputBindingInfo(const std::string& name) const +BindingPointInfo ICaffeParser::CaffeParserImpl::GetNetworkInputBindingInfo(const std::string& name) const { return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo); } -BindingPointInfo CaffeParserBase::GetNetworkOutputBindingInfo(const std::string& name) const +BindingPointInfo ICaffeParser::CaffeParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const { return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo); } -std::pair CaffeParserBase::GetBindingInfo(const std::string& layerName, +std::pair ICaffeParser::CaffeParserImpl::GetBindingInfo( + const std::string& layerName, const char* bindingPointDesc, const std::unordered_map& nameToBindingInfo) { @@ -304,7 +343,7 @@ std::pair CaffeParserBase::GetBindingI return it->second; } -TensorInfo CaffeParserBase::BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const +TensorInfo ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const { std::vector shape; for (int j = 0; j < blobShape.dim_size(); ++j) @@ -329,7 +368,7 @@ BlobShape TensorDescToBlobShape(const TensorInfo& desc) // Note: can move to CaffeParser when/if we optimise the text/string format // to load on a layer by layer basis -vector CaffeParserBase::GetInputs(const LayerParameter& layerParam) +vector ICaffeParser::CaffeParserImpl::GetInputs(const LayerParameter& layerParam) { std::vector ret; ret.reserve(armnn::numeric_cast(layerParam.bottom_size())); @@ -352,7 +391,7 @@ vector CaffeParserBase::GetInputs(const LayerParameter& l return ret; } -void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseInputLayer(const LayerParameter& layerParam) { ARMNN_ASSERT(layerParam.type() == "Input"); ValidateNumInputsOutputs(layerParam, 0, 1); @@ -402,10 +441,10 @@ void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), inputLayer->GetOutputSlot(0)); } -void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerParam, - const armnn::Convolution2dDescriptor& desc, - unsigned int kernelW, - unsigned int kernelH) +void ICaffeParser::CaffeParserImpl::AddConvLayerWithSplits(const caffe::LayerParameter& layerParam, + const armnn::Convolution2dDescriptor& desc, + unsigned int kernelW, + unsigned int kernelH) { ARMNN_ASSERT(layerParam.type() == "Convolution"); ValidateNumInputsOutputs(layerParam, 1, 1); @@ -592,10 +631,10 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0)); } -void CaffeParserBase::AddDeconvLayerWithSplits(const caffe::LayerParameter& layerParam, - const armnn::TransposeConvolution2dDescriptor& desc, - unsigned int kernelW, - unsigned int kernelH) +void ICaffeParser::CaffeParserImpl::AddDeconvLayerWithSplits(const caffe::LayerParameter& layerParam, + const armnn::TransposeConvolution2dDescriptor& desc, + unsigned int kernelW, + unsigned int kernelH) { ARMNN_ASSERT(layerParam.type() == "Deconvolution"); ValidateNumInputsOutputs(layerParam, 1, 1); @@ -780,10 +819,10 @@ void CaffeParserBase::AddDeconvLayerWithSplits(const caffe::LayerParameter& laye SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0)); } -void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& layerParam, - const armnn::Convolution2dDescriptor& convDesc, - unsigned int kernelW, - unsigned int kernelH) +void ICaffeParser::CaffeParserImpl::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& layerParam, + const armnn::Convolution2dDescriptor& convDesc, + unsigned int kernelW, + unsigned int kernelH) { ARMNN_ASSERT(layerParam.type() == "Convolution"); ValidateNumInputsOutputs(layerParam, 1, 1); @@ -870,7 +909,7 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseConvLayer(const LayerParameter& layerParam) { // Ignored Caffe Parameters // * Weight Filler @@ -1049,7 +1088,7 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseDeconvLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseDeconvLayer(const LayerParameter& layerParam) { // Ignored Caffe Parameters // * Weight Filler @@ -1225,7 +1264,7 @@ void CaffeParserBase::ParseDeconvLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParsePoolingLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParsePoolingLayer(const LayerParameter& layerParam) { // Ignored Caffe Parameters // Stochastic Pooling @@ -1337,7 +1376,7 @@ void CaffeParserBase::ParsePoolingLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), poolingLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseArgmaxLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseArgmaxLayer(const LayerParameter& layerParam) { ValidateNumInputsOutputs(layerParam, 1, 1); ArgMaxParameter param = layerParam.argmax_param(); @@ -1397,7 +1436,7 @@ void CaffeParserBase::ParseArgmaxLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), argmaxLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseReluLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseReluLayer(const LayerParameter& layerParam) { ValidateNumInputsOutputs(layerParam, 1, 1); @@ -1423,7 +1462,7 @@ void CaffeParserBase::ParseReluLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), activationLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseLRNLayer(const LayerParameter& layerParam) { ValidateNumInputsOutputs(layerParam, 1, 1); @@ -1522,7 +1561,7 @@ void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), normLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseInnerProductLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseInnerProductLayer(const LayerParameter& layerParam) { InnerProductParameter param = layerParam.inner_product_param(); @@ -1596,7 +1635,7 @@ void CaffeParserBase::ParseInnerProductLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), fullyConnectedLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseSoftmaxLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseSoftmaxLayer(const LayerParameter& layerParam) { ValidateNumInputsOutputs(layerParam, 1, 1); @@ -1618,7 +1657,7 @@ void CaffeParserBase::ParseSoftmaxLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), softmaxLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseEltwiseLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseEltwiseLayer(const LayerParameter& layerParam) { ValidateNumInputsOutputs(layerParam, 2, 1); @@ -1663,7 +1702,7 @@ void CaffeParserBase::ParseEltwiseLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), newLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseConcatLayer(const LayerParameter& layerParam) { unsigned int numInputs = static_cast(layerParam.bottom_size()); // We assume concat happens along the channel dimension, which is 1 in (0, 1, 2, 3). @@ -1722,7 +1761,7 @@ void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatlayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseBatchNormLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseBatchNormLayer(const LayerParameter& layerParam) { ValidateNumInputsOutputs(layerParam, 1, 1); @@ -1786,7 +1825,7 @@ void CaffeParserBase::ParseBatchNormLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseScaleLayer(const LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseScaleLayer(const LayerParameter& layerParam) { // Current unoptimal solution: add a batchnormalization layer with 0 mean and 1 variance. ValidateNumInputsOutputs(layerParam, 1, 1); @@ -1836,7 +1875,7 @@ void CaffeParserBase::ParseScaleLayer(const LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0)); } -void CaffeParserBase::ParseSplitLayer(const caffe::LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseSplitLayer(const caffe::LayerParameter& layerParam) { // Used in caffe to duplicate memory - not necessary in armnn. if (layerParam.bottom_size() != 1) @@ -1855,7 +1894,7 @@ void CaffeParserBase::ParseSplitLayer(const caffe::LayerParameter& layerParam) } } -void CaffeParserBase::ParseDropoutLayer(const caffe::LayerParameter& layerParam) +void ICaffeParser::CaffeParserImpl::ParseDropoutLayer(const caffe::LayerParameter& layerParam) { // Ignored for inference, so patch the single input to its single output. if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1) @@ -1871,21 +1910,21 @@ void CaffeParserBase::ParseDropoutLayer(const caffe::LayerParameter& layerParam) SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0))); } -void CaffeParserBase::TrackInputBinding(armnn::IConnectableLayer* layer, +void ICaffeParser::CaffeParserImpl::TrackInputBinding(armnn::IConnectableLayer* layer, armnn::LayerBindingId id, const armnn::TensorInfo& tensorInfo) { return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkInputsBindingInfo); } -void CaffeParserBase::TrackOutputBinding(armnn::IConnectableLayer* layer, +void ICaffeParser::CaffeParserImpl::TrackOutputBinding(armnn::IConnectableLayer* layer, armnn::LayerBindingId id, const armnn::TensorInfo& tensorInfo) { return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkOutputsBindingInfo); } -void CaffeParserBase::TrackBindingPoint(armnn::IConnectableLayer* layer, +void ICaffeParser::CaffeParserImpl::TrackBindingPoint(armnn::IConnectableLayer* layer, armnn::LayerBindingId id, const armnn::TensorInfo& tensorInfo, const char* bindingPointDesc, @@ -1907,7 +1946,7 @@ void CaffeParserBase::TrackBindingPoint(armnn::IConnectableLayer* layer, } } -armnn::IOutputSlot& CaffeParserBase::GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const +armnn::IOutputSlot& ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const { auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName); if (it != m_ArmnnOutputSlotForCaffeTop.end()) @@ -1923,7 +1962,7 @@ armnn::IOutputSlot& CaffeParserBase::GetArmnnOutputSlotForCaffeTop(const std::st } } -void CaffeParserBase::SetArmnnOutputSlotForCaffeTop( +void ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop( const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot) { auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName); @@ -1942,7 +1981,7 @@ void CaffeParserBase::SetArmnnOutputSlotForCaffeTop( // Note: can move to CaffeParser when/if we optimise the text/string format // to load on a layer by layer basis -void CaffeParserBase::ResolveInPlaceLayers(caffe::NetParameter& netParameter) +void ICaffeParser::CaffeParserImpl::ResolveInPlaceLayers(caffe::NetParameter& netParameter) { // Finds layers with the same top. std::map> layersByTop; @@ -1998,7 +2037,7 @@ void CaffeParserBase::ResolveInPlaceLayers(caffe::NetParameter& netParameter) // Note: can move to CaffeParser when/if we optimise the text/string format // to load on a layer by layer basis -void CaffeParserBase::LoadNetParam(NetParameter& netParameter) +void ICaffeParser::CaffeParserImpl::LoadNetParam(NetParameter& netParameter) { // Caffe models sometimes have an implicit input layer. // In that case, add an explicit one. @@ -2094,7 +2133,7 @@ void CaffeParserBase::LoadNetParam(NetParameter& netParameter) } } -INetworkPtr CaffeParserBase::CreateNetworkFromTextFile(const char* graphFile, +INetworkPtr ICaffeParser::CaffeParserImpl::CreateNetworkFromTextFile(const char* graphFile, const std::map& inputShapes, const std::vector& requestedOutputs) { @@ -2126,7 +2165,7 @@ INetworkPtr CaffeParserBase::CreateNetworkFromTextFile(const char* graphFile, return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs); } -INetworkPtr CaffeParserBase::CreateNetworkFromString(const char* protoText, +INetworkPtr ICaffeParser::CaffeParserImpl::CreateNetworkFromString(const char* protoText, const std::map& inputShapes, const std::vector& requestedOutputs) { @@ -2180,7 +2219,7 @@ INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile, // Note: can move to CaffeParser when/if we optimise the text/string format // to load on a layer by layer basis -INetworkPtr CaffeParserBase::CreateNetworkFromNetParameter(NetParameter& netParam, +INetworkPtr ICaffeParser::CaffeParserImpl::CreateNetworkFromNetParameter(NetParameter& netParam, const std::map& inputShapes, const std::vector& requestedOutputs) { @@ -2211,7 +2250,7 @@ INetworkPtr CaffeParserBase::CreateNetworkFromNetParameter(NetParameter& netPara return move(m_Network); } -void CaffeParserBase::Cleanup() { +void ICaffeParser::CaffeParserImpl::Cleanup() { // cleanup, in case we reuse this parser m_InputShapes.clear(); m_RequestedOutputs.clear(); diff --git a/src/armnnCaffeParser/CaffeParser.hpp b/src/armnnCaffeParser/CaffeParser.hpp index 98eeffc6a1..f369d5f2f9 100644 --- a/src/armnnCaffeParser/CaffeParser.hpp +++ b/src/armnnCaffeParser/CaffeParser.hpp @@ -23,32 +23,39 @@ class NetParameter; namespace armnnCaffeParser { -class CaffeParserBase: public ICaffeParser +class ICaffeParser::CaffeParserImpl { public: // Because we haven't looked at reducing the memory usage when loading from Text/String // have to retain these functions here for the moment. /// Create the network from a protobuf text file on disk - virtual armnn::INetworkPtr CreateNetworkFromTextFile( + armnn::INetworkPtr CreateNetworkFromTextFile( const char* graphFile, const std::map& inputShapes, - const std::vector& requestedOutputs) override; + const std::vector& requestedOutputs); + + /// Create the network from a protobuf binary file on the disk. + virtual armnn::INetworkPtr CreateNetworkFromBinaryFile( + const char* graphFile, + const std::map& inputShapes, + const std::vector& requestedOutputs) = 0; /// Creates the network directly from protobuf text in a string. Useful for debugging/testing. - virtual armnn::INetworkPtr CreateNetworkFromString( + armnn::INetworkPtr CreateNetworkFromString( const char* protoText, const std::map& inputShapes, - const std::vector& requestedOutputs) override; + const std::vector& requestedOutputs); /// Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name. - virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const override; + BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const; /// Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name. - virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const override; + BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const; - CaffeParserBase(); + CaffeParserImpl(); + virtual ~CaffeParserImpl() = default; protected: /// Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type @@ -118,7 +125,7 @@ protected: void Cleanup(); - using OperationParsingFunction = void(CaffeParserBase::*)(const caffe::LayerParameter& layerParam); + using OperationParsingFunction = void(CaffeParserImpl::*)(const caffe::LayerParameter& layerParam); /// Maps Caffe layer names to parsing member functions. static const std::map ms_CaffeLayerNameToParsingFunctions; @@ -162,7 +169,7 @@ protected: }; -class CaffeParser : public CaffeParserBase +class CaffeParser : public ICaffeParser::CaffeParserImpl { public: diff --git a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp index a59725cbd2..b7ff3d8731 100644 --- a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp +++ b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp @@ -456,7 +456,7 @@ void ResolveInPlaceLayers(std::vector& layerInfo) } // anonymous namespace, can't be seen outside this source file -RecordByRecordCaffeParser::RecordByRecordCaffeParser() : CaffeParserBase() +RecordByRecordCaffeParser::RecordByRecordCaffeParser() : CaffeParserImpl() {} armnn::INetworkPtr RecordByRecordCaffeParser::CreateNetworkFromBinaryFile( diff --git a/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp b/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp index 361d6f428d..aab2fb025b 100644 --- a/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp +++ b/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp @@ -22,7 +22,7 @@ class NetParameterInfo; class LayerParameterInfo; -class RecordByRecordCaffeParser : public CaffeParserBase +class RecordByRecordCaffeParser : public ICaffeParser::CaffeParserImpl { public: diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp index f3d0a73342..9f5aa1975a 100644 --- a/src/armnnOnnxParser/OnnxParser.cpp +++ b/src/armnnOnnxParser/OnnxParser.cpp @@ -20,6 +20,51 @@ using namespace armnn; namespace armnnOnnxParser { + +IOnnxParser::IOnnxParser() : pOnnxParserImpl(new OnnxParserImpl()) {} + +IOnnxParser::~IOnnxParser() = default; + +IOnnxParser* IOnnxParser::CreateRaw() +{ + return new IOnnxParser(); +} + +IOnnxParserPtr IOnnxParser::Create() +{ + return IOnnxParserPtr(CreateRaw(), &IOnnxParser::Destroy); +} + +void IOnnxParser::Destroy(IOnnxParser* parser) +{ + delete parser; +} + +armnn::INetworkPtr IOnnxParser::CreateNetworkFromBinaryFile(const char* graphFile) +{ + return pOnnxParserImpl->CreateNetworkFromBinaryFile(graphFile); +} + +armnn::INetworkPtr IOnnxParser::CreateNetworkFromTextFile(const char* graphFile) +{ + return pOnnxParserImpl->CreateNetworkFromTextFile(graphFile); +} + +armnn::INetworkPtr IOnnxParser::CreateNetworkFromString(const std::string& protoText) +{ + return pOnnxParserImpl->CreateNetworkFromString(protoText); +} + +BindingPointInfo IOnnxParser::GetNetworkInputBindingInfo(const std::string& name) const +{ + return pOnnxParserImpl->GetNetworkInputBindingInfo(name); +} + +BindingPointInfo IOnnxParser::GetNetworkOutputBindingInfo(const std::string& name) const +{ + return pOnnxParserImpl->GetNetworkOutputBindingInfo(name); +} + namespace { void CheckValidDataType(std::initializer_list validInputTypes, @@ -357,25 +402,25 @@ TensorInfo ComputeReshapeInfo(const TensorShape& targetShapeTensor, } //namespace -const std::map OnnxParser::m_ParserFunctions = { - { "BatchNormalization", &OnnxParser::ParseBatchNormalization}, - { "GlobalAveragePool", &OnnxParser::ParseGlobalAveragePool}, - { "AveragePool", &OnnxParser::ParseAveragePool }, - { "Clip", &OnnxParser::ParseClip }, - { "Constant", &OnnxParser::ParseConstant }, - { "MaxPool", &OnnxParser::ParseMaxPool }, - { "Reshape", &OnnxParser::ParseReshape }, - { "Sigmoid", &OnnxParser::ParseSigmoid }, - { "Tanh", &OnnxParser::ParseTanh }, - { "Relu", &OnnxParser::ParseRelu }, - { "LeakyRelu", &OnnxParser::ParseLeakyRelu }, - { "Conv", &OnnxParser::ParseConv }, - { "Add", &OnnxParser::ParseAdd }, - { "Flatten", &OnnxParser::ParseFlatten}, +const std::map OnnxParserImpl::m_ParserFunctions = { + { "BatchNormalization", &OnnxParserImpl::ParseBatchNormalization}, + { "GlobalAveragePool", &OnnxParserImpl::ParseGlobalAveragePool}, + { "AveragePool", &OnnxParserImpl::ParseAveragePool }, + { "Clip", &OnnxParserImpl::ParseClip }, + { "Constant", &OnnxParserImpl::ParseConstant }, + { "MaxPool", &OnnxParserImpl::ParseMaxPool }, + { "Reshape", &OnnxParserImpl::ParseReshape }, + { "Sigmoid", &OnnxParserImpl::ParseSigmoid }, + { "Tanh", &OnnxParserImpl::ParseTanh }, + { "Relu", &OnnxParserImpl::ParseRelu }, + { "LeakyRelu", &OnnxParserImpl::ParseLeakyRelu }, + { "Conv", &OnnxParserImpl::ParseConv }, + { "Add", &OnnxParserImpl::ParseAdd }, + { "Flatten", &OnnxParserImpl::ParseFlatten}, }; template -void OnnxParser::ValidateInputs(const onnx::NodeProto& node, +void OnnxParserImpl::ValidateInputs(const onnx::NodeProto& node, TypePair validInputs, const Location& location) { @@ -391,13 +436,13 @@ void OnnxParser::ValidateInputs(const onnx::NodeProto& node, } #define VALID_INPUTS(NODE, VALID_INPUTS) \ - OnnxParser::ValidateInputs(NODE, \ + OnnxParserImpl::ValidateInputs(NODE, \ VALID_INPUTS, \ CHECK_LOCATION()) -std::vector OnnxParser::ComputeOutputInfo(std::vector outNames, - const IConnectableLayer* layer, - std::vector inputShapes) +std::vector OnnxParserImpl::ComputeOutputInfo(std::vector outNames, + const IConnectableLayer* layer, + std::vector inputShapes) { ARMNN_ASSERT(! outNames.empty()); bool needCompute = std::any_of(outNames.begin(), @@ -427,33 +472,18 @@ std::vector OnnxParser::ComputeOutputInfo(std::vector o return outInfo; } -IOnnxParser* IOnnxParser::CreateRaw() -{ - return new OnnxParser(); -} - -IOnnxParserPtr IOnnxParser::Create() -{ - return IOnnxParserPtr(CreateRaw(), &IOnnxParser::Destroy); -} - -void IOnnxParser::Destroy(IOnnxParser* parser) -{ - delete parser; -} - -OnnxParser::OnnxParser() +OnnxParserImpl::OnnxParserImpl() : m_Network(nullptr, nullptr) { } -void OnnxParser::ResetParser() +void OnnxParserImpl::ResetParser() { m_Network = armnn::INetworkPtr(nullptr, nullptr); m_Graph = nullptr; } -void OnnxParser::Cleanup() +void OnnxParserImpl::Cleanup() { m_TensorConnections.clear(); m_TensorsInfo.clear(); @@ -461,7 +491,7 @@ void OnnxParser::Cleanup() m_OutputsFusedAndUsed.clear(); } -std::pair> OnnxParser::CreateConstTensor(const std::string name) +std::pair> OnnxParserImpl::CreateConstTensor(const std::string name) { const TensorInfo tensorInfo = *m_TensorsInfo[name].m_info; onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor; @@ -499,7 +529,7 @@ std::pair> OnnxParser::CreateConstTensor(c return std::make_pair(ConstTensor(tensorInfo, tensorData.get()), std::move(tensorData)); } -ModelPtr OnnxParser::LoadModelFromTextFile(const char* graphFile) +ModelPtr OnnxParserImpl::LoadModelFromTextFile(const char* graphFile) { FILE* fd = fopen(graphFile, "r"); @@ -524,7 +554,7 @@ ModelPtr OnnxParser::LoadModelFromTextFile(const char* graphFile) return modelProto; } -INetworkPtr OnnxParser::CreateNetworkFromTextFile(const char* graphFile) +INetworkPtr OnnxParserImpl::CreateNetworkFromTextFile(const char* graphFile) { ResetParser(); ModelPtr modelProto = LoadModelFromTextFile(graphFile); @@ -532,7 +562,7 @@ INetworkPtr OnnxParser::CreateNetworkFromTextFile(const char* graphFile) } -ModelPtr OnnxParser::LoadModelFromBinaryFile(const char* graphFile) +ModelPtr OnnxParserImpl::LoadModelFromBinaryFile(const char* graphFile) { FILE* fd = fopen(graphFile, "rb"); @@ -560,14 +590,14 @@ ModelPtr OnnxParser::LoadModelFromBinaryFile(const char* graphFile) } -INetworkPtr OnnxParser::CreateNetworkFromBinaryFile(const char* graphFile) +INetworkPtr OnnxParserImpl::CreateNetworkFromBinaryFile(const char* graphFile) { ResetParser(); ModelPtr modelProto = LoadModelFromBinaryFile(graphFile); return CreateNetworkFromModel(*modelProto); } -ModelPtr OnnxParser::LoadModelFromString(const std::string& protoText) +ModelPtr OnnxParserImpl::LoadModelFromString(const std::string& protoText) { if (protoText == "") { @@ -586,14 +616,14 @@ ModelPtr OnnxParser::LoadModelFromString(const std::string& protoText) return modelProto; } -INetworkPtr OnnxParser::CreateNetworkFromString(const std::string& protoText) +INetworkPtr OnnxParserImpl::CreateNetworkFromString(const std::string& protoText) { ResetParser(); ModelPtr modelProto = LoadModelFromString(protoText); return CreateNetworkFromModel(*modelProto); } -INetworkPtr OnnxParser::CreateNetworkFromModel(onnx::ModelProto& model) +INetworkPtr OnnxParserImpl::CreateNetworkFromModel(onnx::ModelProto& model) { m_Network = INetwork::Create(); try @@ -610,7 +640,7 @@ INetworkPtr OnnxParser::CreateNetworkFromModel(onnx::ModelProto& model) return std::move(m_Network); } -void OnnxParser::LoadGraph() +void OnnxParserImpl::LoadGraph() { ARMNN_ASSERT(m_Graph.get() != nullptr); @@ -684,7 +714,7 @@ void OnnxParser::LoadGraph() } } -void OnnxParser::SetupInfo(const google::protobuf::RepeatedPtrField* list) +void OnnxParserImpl::SetupInfo(const google::protobuf::RepeatedPtrField* list) { for (auto tensor : *list) { @@ -695,7 +725,7 @@ void OnnxParser::SetupInfo(const google::protobuf::RepeatedPtrField (static_cast(m_Graph->node_size()), UsageSummary()); auto matmulAndConstant = [&](const std::string& constInput, @@ -753,10 +783,10 @@ void OnnxParser::DetectFullyConnected() } template -void OnnxParser::GetInputAndParam(const onnx::NodeProto& node, - std::string* inputName, - std::string* constName, - const Location& location) +void OnnxParserImpl::GetInputAndParam(const onnx::NodeProto& node, + std::string* inputName, + std::string* constName, + const Location& location) { int cstIndex; if (m_TensorsInfo[node.input(0)].isConstant()) @@ -786,7 +816,7 @@ void OnnxParser::GetInputAndParam(const onnx::NodeProto& node, } template -void OnnxParser::To1DTensor(const std::string& name, const Location& location) +void OnnxParserImpl::To1DTensor(const std::string& name, const Location& location) { TensorShape shape = m_TensorsInfo[name].m_info->GetShape(); std::vector newShape; @@ -805,7 +835,7 @@ void OnnxParser::To1DTensor(const std::string& name, const Location& location) m_TensorsInfo[name].m_info->SetShape(TensorShape(static_cast(newShape.size()), newShape.data())); } -void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc) +void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc) { ARMNN_ASSERT(node.op_type() == "Conv"); @@ -864,7 +894,7 @@ void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, cons RegisterOutputSlots(layer, {node.output(0)}); } -void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx::NodeProto* addNode) +void OnnxParserImpl::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx::NodeProto* addNode) { // find matmul inputs @@ -941,7 +971,7 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx } } -void OnnxParser::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescriptor& desc) +void OnnxParserImpl::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescriptor& desc) { CHECK_VALID_SIZE(static_cast(node.input_size()), 1); @@ -1021,8 +1051,8 @@ void OnnxParser::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescripto RegisterOutputSlots(layer, {node.output(0)}); } -std::pair OnnxParser::AddPrepareBroadcast(const std::string& input0, - const std::string& input1) +std::pair OnnxParserImpl::AddPrepareBroadcast(const std::string& input0, + const std::string& input1) { std::pair inputs = std::make_pair(input0, input1); @@ -1044,7 +1074,7 @@ std::pair OnnxParser::AddPrepareBroadcast(const std::s return inputs; } -void OnnxParser::CreateConstantLayer(const std::string& tensorName, const std::string& layerName) +void OnnxParserImpl::CreateConstantLayer(const std::string& tensorName, const std::string& layerName) { auto armnnTensor = CreateConstTensor(tensorName); @@ -1053,9 +1083,9 @@ void OnnxParser::CreateConstantLayer(const std::string& tensorName, const std::s RegisterOutputSlots(layer, {tensorName}); } -void OnnxParser::CreateReshapeLayer(const std::string& inputName, - const std::string& outputName, - const std::string& layerName) +void OnnxParserImpl::CreateReshapeLayer(const std::string& inputName, + const std::string& outputName, + const std::string& layerName) { const TensorInfo outputTensorInfo = *m_TensorsInfo[outputName].m_info; ReshapeDescriptor reshapeDesc; @@ -1073,7 +1103,7 @@ void OnnxParser::CreateReshapeLayer(const std::string& inputName, RegisterOutputSlots(layer, {outputName}); } -void OnnxParser::ParseActivation(const onnx::NodeProto& node, const armnn::ActivationFunction func) +void OnnxParserImpl::ParseActivation(const onnx::NodeProto& node, const armnn::ActivationFunction func) { CHECK_VALID_SIZE(static_cast(node.input_size()), 1, 3); CHECK_VALID_SIZE(static_cast(node.output_size()), 1); @@ -1103,32 +1133,32 @@ void OnnxParser::ParseActivation(const onnx::NodeProto& node, const armnn::Activ RegisterOutputSlots(layer, {node.output(0)}); } -void OnnxParser::ParseClip(const onnx::NodeProto& node) +void OnnxParserImpl::ParseClip(const onnx::NodeProto& node) { ParseActivation(node, ActivationFunction::BoundedReLu); } -void OnnxParser::ParseSigmoid(const onnx::NodeProto& node) +void OnnxParserImpl::ParseSigmoid(const onnx::NodeProto& node) { ParseActivation(node, ActivationFunction::Sigmoid); } -void OnnxParser::ParseTanh(const onnx::NodeProto& node) +void OnnxParserImpl::ParseTanh(const onnx::NodeProto& node) { ParseActivation(node, ActivationFunction::TanH); } -void OnnxParser::ParseRelu(const onnx::NodeProto& node) +void OnnxParserImpl::ParseRelu(const onnx::NodeProto& node) { ParseActivation(node, ActivationFunction::ReLu); } -void OnnxParser::ParseLeakyRelu(const onnx::NodeProto& node) +void OnnxParserImpl::ParseLeakyRelu(const onnx::NodeProto& node) { ParseActivation(node, ActivationFunction::LeakyReLu); } -void OnnxParser::ParseAdd(const onnx::NodeProto& node) +void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node) { CHECK_VALID_SIZE(static_cast(node.input_size()), 2); CHECK_VALID_SIZE(static_cast(node.output_size()), 1); @@ -1186,7 +1216,7 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node) RegisterOutputSlots(layer, {node.output(0)}); } -void OnnxParser::ParseAveragePool(const onnx::NodeProto& node) +void OnnxParserImpl::ParseAveragePool(const onnx::NodeProto& node) { Pooling2dDescriptor desc; desc.m_PoolType = PoolingAlgorithm::Average; @@ -1199,7 +1229,7 @@ void OnnxParser::ParseAveragePool(const onnx::NodeProto& node) AddPoolingLayer(node, desc); } -void OnnxParser::ParseBatchNormalization(const onnx::NodeProto& node) +void OnnxParserImpl::ParseBatchNormalization(const onnx::NodeProto& node) { //IGNORE momentum parameter and spatial parameters @@ -1246,7 +1276,7 @@ void OnnxParser::ParseBatchNormalization(const onnx::NodeProto& node) RegisterOutputSlots(layer, {node.output(0)}); } -void OnnxParser::ParseConstant(const onnx::NodeProto& node) +void OnnxParserImpl::ParseConstant(const onnx::NodeProto& node) { CHECK_VALID_SIZE(static_cast(node.attribute_size()), 1); if (!node.attribute(0).has_t()) @@ -1269,7 +1299,7 @@ void OnnxParser::ParseConstant(const onnx::NodeProto& node) CreateConstantLayer(node.output(0), node.name()); } -void OnnxParser::ParseConv(const onnx::NodeProto& node) +void OnnxParserImpl::ParseConv(const onnx::NodeProto& node) { CHECK_VALID_SIZE(static_cast(node.input_size()), 2, 3); //input, weight, (bias) CHECK_VALID_SIZE(static_cast(node.output_size()), 1); @@ -1462,7 +1492,7 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node) RegisterOutputSlots(layer, {node.output(0)}); } -void OnnxParser::ParseFlatten(const onnx::NodeProto& node) +void OnnxParserImpl::ParseFlatten(const onnx::NodeProto& node) { CHECK_VALID_SIZE(static_cast(node.input_size()), 1); CHECK_VALID_SIZE(static_cast(node.output_size()), 1); @@ -1509,7 +1539,7 @@ void OnnxParser::ParseFlatten(const onnx::NodeProto& node) CreateReshapeLayer(node.input(0), node.output(0), node.name()); } -void OnnxParser::ParseGlobalAveragePool(const onnx::NodeProto& node) +void OnnxParserImpl::ParseGlobalAveragePool(const onnx::NodeProto& node) { Pooling2dDescriptor desc = Pooling2dDescriptor(); desc.m_PoolType = PoolingAlgorithm::Average; @@ -1533,7 +1563,7 @@ void OnnxParser::ParseGlobalAveragePool(const onnx::NodeProto& node) RegisterOutputSlots(layer, {node.output(0)}); } -void OnnxParser::ParseMaxPool(const onnx::NodeProto& node) +void OnnxParserImpl::ParseMaxPool(const onnx::NodeProto& node) { Pooling2dDescriptor desc; desc.m_PoolType = PoolingAlgorithm::Max; @@ -1541,7 +1571,7 @@ void OnnxParser::ParseMaxPool(const onnx::NodeProto& node) AddPoolingLayer(node, desc); } -void OnnxParser::ParseReshape(const onnx::NodeProto& node) +void OnnxParserImpl::ParseReshape(const onnx::NodeProto& node) { CHECK_VALID_SIZE(static_cast(node.input_size()), 2); CHECK_VALID_SIZE(static_cast(node.output_size()), 1); @@ -1594,9 +1624,9 @@ void OnnxParser::ParseReshape(const onnx::NodeProto& node) } } -void OnnxParser::PrependForBroadcast(const std::string& outputName, - const std::string& input0, - const std::string& input1) +void OnnxParserImpl::PrependForBroadcast(const std::string& outputName, + const std::string& input0, + const std::string& input1) { //input0 should be reshaped to have same number of dim as input1 TensorInfo outputTensorInfo = TensorInfo(*m_TensorsInfo[input0].m_info); @@ -1633,7 +1663,7 @@ void OnnxParser::PrependForBroadcast(const std::string& outputName, } } -void OnnxParser::SetupInputLayers() +void OnnxParserImpl::SetupInputLayers() { //Find user input and add their layers for(int inputIndex = 0; inputIndex < m_Graph->input_size(); ++inputIndex) @@ -1651,7 +1681,7 @@ void OnnxParser::SetupInputLayers() } } -void OnnxParser::SetupOutputLayers() +void OnnxParserImpl::SetupOutputLayers() { if(m_Graph->output_size() == 0) { @@ -1668,7 +1698,7 @@ void OnnxParser::SetupOutputLayers() } } -void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector& tensorIds) +void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vector& tensorIds) { ARMNN_ASSERT(layer != nullptr); if (tensorIds.size() != layer->GetNumInputSlots()) @@ -1695,7 +1725,7 @@ void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector< } } -void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector& tensorIds) +void OnnxParserImpl::RegisterOutputSlots(IConnectableLayer* layer, const std::vector& tensorIds) { ARMNN_ASSERT(layer != nullptr); if (tensorIds.size() != layer->GetNumOutputSlots()) @@ -1734,7 +1764,7 @@ void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector } } -BindingPointInfo OnnxParser::GetNetworkInputBindingInfo(const std::string& name) const +BindingPointInfo OnnxParserImpl::GetNetworkInputBindingInfo(const std::string& name) const { for(int i = 0; i < m_Graph->input_size(); ++i) { @@ -1748,7 +1778,7 @@ BindingPointInfo OnnxParser::GetNetworkInputBindingInfo(const std::string& name) name, CHECK_LOCATION().AsString())); } -BindingPointInfo OnnxParser::GetNetworkOutputBindingInfo(const std::string& name) const +BindingPointInfo OnnxParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const { for(int i = 0; i < m_Graph->output_size(); ++i) { @@ -1762,7 +1792,7 @@ BindingPointInfo OnnxParser::GetNetworkOutputBindingInfo(const std::string& name name, CHECK_LOCATION().AsString())); } -std::vector OnnxParser::GetInputs(ModelPtr& model) +std::vector OnnxParserImpl::GetInputs(ModelPtr& model) { if(model == nullptr) { throw InvalidArgumentException(fmt::format("The given model cannot be null {}", @@ -1786,7 +1816,7 @@ std::vector OnnxParser::GetInputs(ModelPtr& model) return inputNames; } -std::vector OnnxParser::GetOutputs(ModelPtr& model) +std::vector OnnxParserImpl::GetOutputs(ModelPtr& model) { if(model == nullptr) { throw InvalidArgumentException(fmt::format("The given model cannot be null {}", diff --git a/src/armnnOnnxParser/OnnxParser.hpp b/src/armnnOnnxParser/OnnxParser.hpp index a87863e95c..0db93248bc 100644 --- a/src/armnnOnnxParser/OnnxParser.hpp +++ b/src/armnnOnnxParser/OnnxParser.hpp @@ -22,33 +22,34 @@ namespace armnnOnnxParser using ModelPtr = std::unique_ptr; -class OnnxParser : public IOnnxParser +class OnnxParserImpl { -using OperationParsingFunction = void(OnnxParser::*)(const onnx::NodeProto& NodeProto); +using OperationParsingFunction = void(OnnxParserImpl::*)(const onnx::NodeProto& NodeProto); public: using GraphPtr = std::unique_ptr; /// Create the network from a protobuf binary file on disk - virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) override; + armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile); /// Create the network from a protobuf text file on disk - virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile) override; + armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile); /// Create the network directly from protobuf text in a string. Useful for debugging/testing - virtual armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText) override; + armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText); /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name - virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const override; + BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const; /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name - virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const override; + BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const; public: - OnnxParser(); + OnnxParserImpl(); + ~OnnxParserImpl() = default; static ModelPtr LoadModelFromBinaryFile(const char * fileName); static ModelPtr LoadModelFromTextFile(const char * fileName); diff --git a/src/armnnOnnxParser/test/GetInputsOutputs.cpp b/src/armnnOnnxParser/test/GetInputsOutputs.cpp index b22ef3a308..5bb3095cc7 100644 --- a/src/armnnOnnxParser/test/GetInputsOutputs.cpp +++ b/src/armnnOnnxParser/test/GetInputsOutputs.cpp @@ -68,8 +68,8 @@ struct GetInputsOutputsMainFixture : public armnnUtils::ParserPrototxtFixture tensors = armnnOnnxParser::OnnxParser::GetInputs(model); + ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str()); + std::vector tensors = armnnOnnxParser::OnnxParserImpl::GetInputs(model); BOOST_CHECK_EQUAL(1, tensors.size()); BOOST_CHECK_EQUAL("Input", tensors[0]); @@ -77,8 +77,8 @@ BOOST_FIXTURE_TEST_CASE(GetInput, GetInputsOutputsMainFixture) BOOST_FIXTURE_TEST_CASE(GetOutput, GetInputsOutputsMainFixture) { - ModelPtr model = armnnOnnxParser::OnnxParser::LoadModelFromString(m_Prototext.c_str()); - std::vector tensors = armnnOnnxParser::OnnxParser::GetOutputs(model); + ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str()); + std::vector tensors = armnnOnnxParser::OnnxParserImpl::GetOutputs(model); BOOST_CHECK_EQUAL(1, tensors.size()); BOOST_CHECK_EQUAL("Output", tensors[0]); } @@ -139,20 +139,20 @@ struct GetEmptyInputsOutputsFixture : public armnnUtils::ParserPrototxtFixture tensors = armnnOnnxParser::OnnxParser::GetInputs(model); + ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str()); + std::vector tensors = armnnOnnxParser::OnnxParserImpl::GetInputs(model); BOOST_CHECK_EQUAL(0, tensors.size()); } BOOST_AUTO_TEST_CASE(GetInputsNullModel) { - BOOST_CHECK_THROW(armnnOnnxParser::OnnxParser::LoadModelFromString(""), armnn::InvalidArgumentException); + BOOST_CHECK_THROW(armnnOnnxParser::OnnxParserImpl::LoadModelFromString(""), armnn::InvalidArgumentException); } BOOST_AUTO_TEST_CASE(GetOutputsNullModel) { auto silencer = google::protobuf::LogSilencer(); //get rid of errors from protobuf - BOOST_CHECK_THROW(armnnOnnxParser::OnnxParser::LoadModelFromString("nknnk"), armnn::ParseException); + BOOST_CHECK_THROW(armnnOnnxParser::OnnxParserImpl::LoadModelFromString("nknnk"), armnn::ParseException); } struct GetInputsMultipleFixture : public armnnUtils::ParserPrototxtFixture @@ -243,8 +243,8 @@ struct GetInputsMultipleFixture : public armnnUtils::ParserPrototxtFixture tensors = armnnOnnxParser::OnnxParser::GetInputs(model); + ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str()); + std::vector tensors = armnnOnnxParser::OnnxParserImpl::GetInputs(model); BOOST_CHECK_EQUAL(2, tensors.size()); BOOST_CHECK_EQUAL("Input0", tensors[0]); BOOST_CHECK_EQUAL("Input1", tensors[1]); -- cgit v1.2.1