aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJames Ward <james.ward@arm.com>2020-09-11 17:32:44 +0100
committerJames Ward <james.ward@arm.com>2020-10-02 08:16:54 +0000
commit58dec6bab3d8d588a37d93bafcada89947c9cd58 (patch)
treedc8645f6a520f0a307453eeeb9bbb70b61414f79 /src
parent620e0732abede92f505f69d7676bfbd9b5d4584f (diff)
downloadarmnn-58dec6bab3d8d588a37d93bafcada89947c9cd58.tar.gz
IVGCVSW-5296 Remove boost::format armnn parsers
* replaced with fmt::format * one case required std::stringstream instead Signed-off-by: James Ward <james.ward@arm.com> Change-Id: Ica9a7eb4e7bed04aa03172058dd9e3d10efc8548
Diffstat (limited to 'src')
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp439
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp374
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp452
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp23
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp789
5 files changed, 895 insertions, 1182 deletions
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index d50846abab..0a6a6c5348 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -17,7 +17,7 @@
#include <armnn/utility/NumericCast.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
// Caffe
#include "caffe/proto/caffe.pb.h"
@@ -70,13 +70,11 @@ const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int
if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
{
throw ParseException(
- boost::str(
- boost::format(
- "Expected data blob at index %1% in layer %2% not found. nBlobs=%2%. %4%") %
- blobIndex %
- layerParam.name() %
- nBlobs %
- CHECK_LOCATION().AsString()));
+ fmt::format("Expected data blob at index {} in layer {} not found. nBlobs={}. {}",
+ blobIndex,
+ layerParam.name(),
+ nBlobs,
+ CHECK_LOCATION().AsString()));
}
const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
@@ -91,12 +89,10 @@ void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, u
if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
{
throw ParseException(
- boost::str(
- boost::format(
- "Expected data blob at index %1% in layer %2% not found. %3%") %
- blobIndex %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Expected data blob at index {} in layer {} not found. {}",
+ blobIndex,
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
@@ -105,15 +101,13 @@ void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, u
if (blobSize != outData.size())
{
throw ParseException(
- boost::str(
- boost::format(
- "Data blob at index %1% in layer %2% has an unexpected size. "
- "Expected %3% elements but got %4% elements. %5%") %
- blobIndex %
- layerParam.name() %
- outData.size() %
- blobSize %
- CHECK_LOCATION().AsString()));
+ fmt::format("Data blob at index {} in layer {} has an unexpected size. "
+ "Expected {} elements but got {} elements. {}",
+ blobIndex,
+ layerParam.name(),
+ outData.size(),
+ blobSize,
+ CHECK_LOCATION().AsString()));
}
int outSizeInt = armnn::numeric_cast<int>(outData.size());
@@ -137,26 +131,24 @@ void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter,
if (numInputs != armnn::numeric_cast<unsigned int>(numInputsActual))
{
throw ParseException(
- boost::str(
- boost::format("Invalid number of inputs requested %1% for layer %2% "
- "while only %3% present. %4%") %
- numInputs %
- layerParameter.name() %
- numInputsActual %
- CHECK_LOCATION().AsString()));
+ fmt::format("Invalid number of inputs requested {} for layer {} "
+ "while only {} present. {}",
+ numInputs,
+ layerParameter.name(),
+ numInputsActual,
+ CHECK_LOCATION().AsString()));
}
int numOutputsActual = layerParameter.top_size();
if (numOutputs != armnn::numeric_cast<unsigned int>(numOutputsActual))
{
throw ParseException(
- boost::str(
- boost::format("Invalid number of outputs requested %1% for layer %2% "
- "while only %3% present. %4%") %
- numOutputs %
- layerParameter.name() %
- numOutputsActual %
- CHECK_LOCATION().AsString()));
+ fmt::format("Invalid number of outputs requested {} for layer {} "
+ "while only {} present. {}",
+ numOutputs,
+ layerParameter.name(),
+ numOutputsActual,
+ CHECK_LOCATION().AsString()));
}
}
@@ -303,12 +295,10 @@ std::pair<armnn::LayerBindingId, armnn::TensorInfo> CaffeParserBase::GetBindingI
if (it == nameToBindingInfo.end())
{
throw InvalidArgumentException(
- boost::str(
- boost::format(
- "Unknown binding %1% for layer '%2%'. %3%") %
- bindingPointDesc %
- layerName %
- CHECK_LOCATION().AsString()));
+ fmt::format("Unknown binding {} for layer '{}'. {}",
+ bindingPointDesc,
+ layerName,
+ CHECK_LOCATION().AsString()));
}
return it->second;
}
@@ -349,13 +339,11 @@ vector<const LayerParameter*> CaffeParserBase::GetInputs(const LayerParameter& l
if (inputIt == m_CaffeLayersByTopName.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Can't find Caffe layer with top called '%1%', "
- "which is listed as an input of '%2%'. %3%") %
- inputName %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Can't find Caffe layer with top called '{}', "
+ "which is listed as an input of '{}'. {}",
+ inputName,
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
ret.push_back(inputIt->second);
}
@@ -395,22 +383,18 @@ void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam)
|| originalShape->dim(3) != overrideShape[3]))
{
throw ParseException(
- boost::str(
- boost::format(
- "Parsed input shape for '%1%' is incompatible with the override provided. %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Parsed input shape for '{}' is incompatible with the override provided. {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
inputTensorInfo.SetShape(overrideShape);
}
else if (!originalShape)
{
throw ParseException(
- boost::str(
- boost::format(
- "No input descriptor given for '%1%' and no input shape found in caffe model. %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("No input descriptor given for '{}' and no input shape found in caffe model. {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
TrackInputBinding(inputLayer, inputId, inputTensorInfo);
@@ -592,14 +576,12 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
if (!concatLayer)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to create final concat layer for Split+Convolution+Concat. "
- "Layer=%1% #groups=%2% #filters=%3% %4%") %
- layerParam.name() %
- numGroups %
- numFilters %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to create final concat layer for Split+Convolution+Concat. "
+ "Layer={} #groups={} #filters={} {}",
+ layerParam.name(),
+ numGroups,
+ numFilters,
+ CHECK_LOCATION().AsString()));
}
for (unsigned int g = 0; g < numGroups; ++g)
@@ -686,13 +668,11 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
if (!returnLayer)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to create depthwise convolution layer. "
- "Layer=%1% #filters=%2% %3%") %
- layerParam.name() %
- numFilters %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to create depthwise convolution layer. "
+ "Layer={} #filters={} {}",
+ layerParam.name(),
+ numFilters,
+ CHECK_LOCATION().AsString()));
}
armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
inputConnection.Connect(returnLayer->GetInputSlot(0));
@@ -750,27 +730,23 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
if (numGroups > numFilters)
{
throw ParseException(
- boost::str(
- boost::format(
- "Error parsing Convolution: %1%. "
- "The 'group'=%2% parameter cannot be larger than the "
- "number of filters supplied ='%3%'. %4%") %
- layerParam.name() %
- numGroups %
- numFilters %
- CHECK_LOCATION().AsString()));
+ fmt::format("Error parsing Convolution: {}. "
+ "The 'group'={} parameter cannot be larger than the "
+ "number of filters supplied ='{}'. {}",
+ layerParam.name(),
+ numGroups,
+ numFilters,
+ CHECK_LOCATION().AsString()));
}
if (inputShape.dim_size() != 4)
{
throw ParseException(
- boost::str(
- boost::format(
- "Convolution input shape is expected to have 4 dimensions. "
- "%1%'s input has only %2%. %3%") %
- layerParam.name() %
- inputShape.dim_size() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Convolution input shape is expected to have 4 dimensions. "
+ "{}'s input has only {}. {}",
+ layerParam.name(),
+ inputShape.dim_size(),
+ CHECK_LOCATION().AsString()));
}
if (numGroups > 1)
@@ -778,15 +754,13 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
if (numGroups > inputShape.dim(1))
{
throw ParseException(
- boost::str(
- boost::format(
- "Error parsing Convolution: %1%. "
- "The 'group'=%2% parameter cannot be larger than the "
- "channel of the input shape=%3% (in NCHW format). %4%") %
- layerParam.name() %
- numGroups %
- inputShape.dim(1) %
- CHECK_LOCATION().AsString()));
+ fmt::format("Error parsing Convolution: {}. "
+ "The 'group'={} parameter cannot be larger than the "
+ "channel of the input shape={} (in NCHW format). {}",
+ layerParam.name(),
+ numGroups,
+ inputShape.dim(1),
+ CHECK_LOCATION().AsString()));
}
else if (numGroups == inputShape.dim(1))
{
@@ -869,14 +843,12 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
if (!returnLayer)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to create Convolution layer. "
- "Layer=%1% #groups=%2% #filters=%3% %4%") %
- layerParam.name() %
- numGroups %
- numFilters %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to create Convolution layer. "
+ "Layer={} #groups={} #filters={} {}",
+ layerParam.name(),
+ numGroups,
+ numFilters,
+ CHECK_LOCATION().AsString()));
}
SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
@@ -941,32 +913,26 @@ void CaffeParserBase::ParsePoolingLayer(const LayerParameter& layerParam)
case PoolingParameter_PoolMethod_STOCHASTIC:
{
throw ParseException(
- boost::str(
- boost::format(
- "Pooling Layer: Stochastic Pooling Not Supported. Layer=%1% %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Pooling Layer: Stochastic Pooling Not Supported. Layer={} {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
default:
{
throw ParseException(
- boost::str(
- boost::format(
- "Pooling Layer: unknown pooling method: %1% for layer: %2% %3%") %
- p %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Pooling Layer: unknown pooling method: {} for layer: {} {}",
+ p,
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
}
}
else
{
throw ParseException(
- boost::str(
- boost::format(
- "No Pooling Method Defined for %1% %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("No Pooling Method Defined for {} {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
pooling2dDescriptor.m_PadLeft = pad_w;
@@ -1058,12 +1024,10 @@ void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam)
default:
{
throw ParseException(
- boost::str(
- boost::format(
- "Unknown region %1% for LRN layer %2% %3%") %
- n %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Unknown region {} for LRN layer {} {}",
+ n,
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
}
}
@@ -1081,11 +1045,9 @@ void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam)
else
{
throw ParseException(
- boost::str(
- boost::format(
- "local_size not defined for LRN layer %1% %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("local_size not defined for LRN layer {} {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
if (param.has_alpha())
@@ -1096,11 +1058,9 @@ void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam)
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Alpha not defined for LRN layer %1% %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Alpha not defined for LRN layer {} {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
if (param.has_beta())
{
@@ -1109,11 +1069,9 @@ void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam)
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Beta not defined for LRN layer %1% %2%") %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Beta not defined for LRN layer {} {}",
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
if (param.has_k())
@@ -1261,12 +1219,10 @@ void CaffeParserBase::ParseEltwiseLayer(const LayerParameter& layerParam)
default:
{
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported operation %1% in Eltwise layer %2% %3%") %
- operation %
- layerParam.name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported operation {} in Eltwise layer {} {}",
+ operation,
+ layerParam.name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -1296,14 +1252,12 @@ void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam)
if (inputInfo.GetNumDimensions()!=4)
{
throw ParseException(
- boost::str(
- boost::format(
- "The number of dimensions for input tensors of "
- "the concatenation op should be 4. Inputs of %1% has "
- "%2% dimensions. %3%") %
- layerParam.name() %
- inputInfo.GetNumDimensions() %
- CHECK_LOCATION().AsString()));
+ fmt::format("The number of dimensions for input tensors of "
+ "the concatenation op should be 4. Inputs of {} has "
+ "{} dimensions. {}",
+ layerParam.name(),
+ inputInfo.GetNumDimensions(),
+ CHECK_LOCATION().AsString()));
}
mergeDimSizes[0] = inputInfo.GetShape()[0];
@@ -1353,13 +1307,11 @@ void CaffeParserBase::ParseBatchNormLayer(const LayerParameter& layerParam)
if (!param.use_global_stats())
{
throw ParseException(
- boost::str(
- boost::format(
- "Error parsing Batch Norm layer '%1%': "
- "Parameter 'use_global_stats' is set to false, which is "
- "unsupported (value used for training). %2%") %
- name %
- CHECK_LOCATION().AsString()));
+ fmt::format("Error parsing Batch Norm layer '{}': "
+ "Parameter 'use_global_stats' is set to false, which is "
+ "unsupported (value used for training). {}",
+ name,
+ CHECK_LOCATION().AsString()));
}
}
@@ -1417,13 +1369,11 @@ void CaffeParserBase::ParseScaleLayer(const LayerParameter& layerParam)
{
// Would have to use something other than BatchNormalizationLayer in this case
throw ParseException(
- boost::str(
- boost::format(
- "Loading Scale Layer: Only axis 1 is supported currently. "
- "Layer=%1% Axis=%2% %3%") %
- layerParam.name() %
- param.axis() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Loading Scale Layer: Only axis 1 is supported currently. "
+ "Layer={} Axis={} {}",
+ layerParam.name(),
+ param.axis(),
+ CHECK_LOCATION().AsString()));
}
unsigned int channels = inputInfo.GetShape()[1];
@@ -1461,13 +1411,11 @@ void CaffeParserBase::ParseSplitLayer(const caffe::LayerParameter& layerParam)
if (layerParam.bottom_size() != 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "Split layer '%1%' should have exactly 1 bottom. "
- "#bottoms=%2% %3%") %
- layerParam.name() %
- layerParam.bottom_size() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Split layer '{}' should have exactly 1 bottom. "
+ "#bottoms={} {}",
+ layerParam.name(),
+ layerParam.bottom_size(),
+ CHECK_LOCATION().AsString()));
}
armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
for (int i = 0; i < layerParam.top_size(); i++)
@@ -1482,14 +1430,12 @@ void CaffeParserBase::ParseDropoutLayer(const caffe::LayerParameter& layerParam)
if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "Dropout layer '%1%' should have exactly 1 bottom and 1 top. "
- "#bottoms=%2% #tops=%3% %4%") %
- layerParam.name() %
- layerParam.bottom_size() %
- layerParam.top_size() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Dropout layer '{}' should have exactly 1 bottom and 1 top. "
+ "#bottoms={} #tops={} {}",
+ layerParam.name(),
+ layerParam.bottom_size(),
+ layerParam.top_size(),
+ CHECK_LOCATION().AsString()));
}
SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)));
}
@@ -1523,12 +1469,10 @@ void CaffeParserBase::TrackBindingPoint(armnn::IConnectableLayer* layer,
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Id %1% used by more than one %2% layer %3%") %
- id %
- bindingPointDesc %
- CHECK_LOCATION().AsString()));
+ fmt::format("Id {} used by more than one {} layer {}",
+ id,
+ bindingPointDesc,
+ CHECK_LOCATION().AsString()));
}
}
@@ -1542,11 +1486,9 @@ armnn::IOutputSlot& CaffeParserBase::GetArmnnOutputSlotForCaffeTop(const std::st
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Could not find armnn output slot for Caffe top '%1%' %2%") %
- caffeTopName %
- CHECK_LOCATION().AsString()));
+ fmt::format("Could not find armnn output slot for Caffe top '{}' {}",
+ caffeTopName,
+ CHECK_LOCATION().AsString()));
}
}
@@ -1561,11 +1503,9 @@ void CaffeParserBase::SetArmnnOutputSlotForCaffeTop(
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Attempting to add duplicate entry for Caffe top '%1%' %2%") %
- caffeTopName %
- CHECK_LOCATION().AsString()));
+ fmt::format("Attempting to add duplicate entry for Caffe top '{}' {}",
+ caffeTopName,
+ CHECK_LOCATION().AsString()));
}
}
@@ -1601,28 +1541,24 @@ void CaffeParserBase::ResolveInPlaceLayers(caffe::NetParameter& netParameter)
if (layer1.top_size() != 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "Node '%1%' is an in-place layer but doesn't have exactly one "
- "top. It has %2% instead. %3%") %
- layer1.name() %
- layer1.top_size() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Node '{}' is an in-place layer but doesn't have exactly one "
+ "top. It has {} instead. {}",
+ layer1.name(),
+ layer1.top_size(),
+ CHECK_LOCATION().AsString()));
}
std::string newTop = layer1.name() + "_top";
layer1.set_top(0, newTop);
if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
{
throw ParseException(
- boost::str(
- boost::format(
- "Node '%1%' is an in-place layer but "
- "doesn't have exactly one bottom, or it doesn't match its top. "
- "#bottoms=%2%, first bottom is %3%, top is %4% %5%") %
- layer2.name() %
- layer2.bottom(0) %
- top %
- CHECK_LOCATION().AsString()));
+ fmt::format("Node '{}' is an in-place layer but "
+ "doesn't have exactly one bottom, or it doesn't match its top. "
+ "#bottoms={}, first bottom is {}, top is {} {}",
+ layer2.name(),
+ layer2.bottom(0),
+ top,
+ CHECK_LOCATION().AsString()));
}
layer2.set_bottom(0, newTop);
}
@@ -1674,11 +1610,9 @@ void CaffeParserBase::LoadNetParam(NetParameter& netParameter)
if (nodeIt == m_CaffeLayersByTopName.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Couldn't find requested output layer '%1%' in graph %2%") %
- requestedOutputName %
- CHECK_LOCATION().AsString()));
+ fmt::format("Couldn't find requested output layer '{}' in graph {}",
+ requestedOutputName,
+ CHECK_LOCATION().AsString()));
}
targetLayers.push_back(nodeIt->second);
}
@@ -1694,11 +1628,9 @@ void CaffeParserBase::LoadNetParam(NetParameter& netParameter)
sortedNodes))
{
throw ParseException(
- boost::str(
- boost::format(
- "Cycle detected in graph. #nodes: %1% %2%") %
- sortedNodes.size() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Cycle detected in graph. #nodes: {} {}",
+ sortedNodes.size(),
+ CHECK_LOCATION().AsString()));
}
// Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
@@ -1708,11 +1640,10 @@ void CaffeParserBase::LoadNetParam(NetParameter& netParameter)
if (it == ms_CaffeLayerNameToParsingFunctions.end())
{
throw ParseException(
- boost::str(
- boost::format("Unsupported layer type: '%1%' for layer %2% %3%") %
- current->type() %
- current->name() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported layer type: '{}' for layer {} {}",
+ current->type(),
+ current->name(),
+ CHECK_LOCATION().AsString()));
}
auto func = it->second;
(this->*func)(*current);
@@ -1741,11 +1672,9 @@ INetworkPtr CaffeParserBase::CreateNetworkFromTextFile(const char* graphFile,
if (fd == nullptr)
{
throw FileNotFoundException(
- boost::str(
- boost::format(
- "Failed to open graph file: %1% %2%") %
- graphFile %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to open graph file: {} {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
// Parses the file into a message.
@@ -1758,11 +1687,9 @@ INetworkPtr CaffeParserBase::CreateNetworkFromTextFile(const char* graphFile,
if (!success)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to parse graph file: %1% %2%") %
- graphFile %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to parse graph file: {} {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
@@ -1779,10 +1706,8 @@ INetworkPtr CaffeParserBase::CreateNetworkFromString(const char* protoText,
if (!success)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to parse graph string %1%") %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to parse graph string {}",
+ CHECK_LOCATION().AsString()));
}
return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
@@ -1797,11 +1722,9 @@ INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile,
if (fd == nullptr)
{
throw FileNotFoundException(
- boost::str(
- boost::format(
- "Failed to open graph file at: %1% %2%") %
- graphFile %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to open graph file at: {} {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
// Parses the file into a message.
@@ -1816,11 +1739,9 @@ INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile,
if (!success)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to parse protobuf file: %1% %2%") %
- graphFile %
- CHECK_LOCATION().AsString()));
+ fmt::format("Failed to parse protobuf file: {} {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 01ad12448f..4ae6627ac2 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -9,7 +9,7 @@
#include <armnn/utility/NumericCast.hpp>
#include <VerificationHelpers.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
@@ -35,13 +35,12 @@ void CheckValidDataType(std::initializer_list<onnx::TensorProto::DataType> valid
if (!isValid)
{
throw ParseException(
- boost::str(
- boost::format("Datatype %1% is not valid for tensor '%2%' of node '%3%', not in {%4%}. %5%") %
- onnx::TensorProto::DataType_Name(actualValue) %
- tensorName %
- nodeName %
- validExpr %
- location.AsString()));
+ fmt::format("Datatype {} is not valid for tensor '{}' of node '{}', not in {{{}}}. {}",
+ onnx::TensorProto::DataType_Name(actualValue),
+ tensorName,
+ nodeName,
+ validExpr,
+ location.AsString()));
}
}
@@ -69,14 +68,13 @@ void ReadMandatoryNodeAttributeImpl(const onnx::NodeProto& node,
}
else
{
- throw ParseException(boost::str(boost::format(
- "Attribute %1% of node %2% expected to have %3% as onnx::AttributeProto::AttributeType, "
- "but found %4% instead %5%")
- % attribName
- % node.name()
- % onnx::AttributeProto::AttributeType_Name(expectedType)
- % onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type())
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Attribute {} of node {} expected to have {} as "
+ "onnx::AttributeProto::AttributeType, but found {} instead {}",
+ attribName,
+ node.name(),
+ onnx::AttributeProto::AttributeType_Name(expectedType),
+ onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
+ CHECK_LOCATION().AsString()));
}
break;
}
@@ -84,8 +82,8 @@ void ReadMandatoryNodeAttributeImpl(const onnx::NodeProto& node,
}
if (attriNum == node.attribute_size())
{
- throw ParseException(boost::str(boost::format("Could not find required attribute %1% in node %2% %3%")
- % attribName % node.name() % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Could not find required attribute {} in node {} {}",
+ attribName, node.name(), CHECK_LOCATION().AsString()));
}
}
@@ -106,14 +104,14 @@ void ReadOptionalNodeAttributeImpl(const onnx::NodeProto& node,
}
else
{
- throw ParseException(boost::str(boost::format(
- "Attribute %1% of node %2% expected to have %3% as onnx::AttributeProto::AttributeType, "
- "but found %4% instead %5%")
- % attribName
- % node.name()
- % onnx::AttributeProto::AttributeType_Name(expectedType)
- % onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type())
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Attribute {} of node {} expected to have {} as onnx::AttributeProto::AttributeType, "
+ "but found {} instead {}",
+ attribName,
+ node.name(),
+ onnx::AttributeProto::AttributeType_Name(expectedType),
+ onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
+ CHECK_LOCATION().AsString()));
}
}
}
@@ -219,13 +217,11 @@ armnn::TensorInfo ToTensorInfo(const std::string& name, std::vector<unsigned int
default:
{
throw ParseException(
- boost::str(
- boost::format("'%1%' is not a currently supported datatype for tensor %2%."
- " Supported dataTypes are FLOAT, INT32 and INT64. %3%") %
- onnx::TensorProto::DataType_Name(
- static_cast<onnx::TensorProto::DataType>(data_type)) %
- name %
- CHECK_LOCATION().AsString() ));
+ fmt::format("'{}' is not a currently supported datatype for tensor {}."
+ " Supported dataTypes are FLOAT, INT32 and INT64. {}",
+ onnx::TensorProto::DataType_Name(static_cast<onnx::TensorProto::DataType>(data_type)),
+ name,
+ CHECK_LOCATION().AsString() ));
}
}
@@ -342,12 +338,12 @@ TensorInfo ComputeReshapeInfo(const TensorShape& targetShapeTensor,
}
ss << targetDims[targetDims.size() - 1] << " ]";
- throw ParseException(boost::str(
- boost::format("Error during creation of reshaped tensor '%1%'. At most one component of shape can be "
- " -1 and here, shape is %2% %3%")
- % outName
- % ss.str()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Error during creation of reshaped tensor '{}'. At most one component of shape can be "
+ " -1 and here, shape is {} {}",
+ outName,
+ ss.str(),
+ CHECK_LOCATION().AsString()));
}
auto targetNumElements = armnn::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
@@ -478,13 +474,13 @@ std::pair<ConstTensor, std::unique_ptr<float[]>> OnnxParser::CreateConstTensor(c
{
if(tensorInfo.GetNumElements() != static_cast<uint>(onnxTensor.float_data_size()))
{
- throw ParseException(boost::str(
- boost::format("The number of data provided (%1%) does not match the tensor '%2%' number of elements"
- " (%3%) %4%")
- % onnxTensor.float_data_size()
- % name
- % tensorInfo.GetNumElements()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("The number of data provided ({}) does not match the tensor '{}' number of "
+ "elements ({}) {}",
+ onnxTensor.float_data_size(),
+ name,
+ tensorInfo.GetNumElements(),
+ CHECK_LOCATION().AsString()));
}
::memcpy(tensorData.get(), srcData, tensorSizeInBytes);
}
@@ -496,10 +492,9 @@ std::pair<ConstTensor, std::unique_ptr<float[]>> OnnxParser::CreateConstTensor(c
// Const tensors requires at least a list of values
if (tensorInfo.GetNumElements() == 0)
{
- throw ParseException(boost::str(
- boost::format("No tensor data found for Const tensor '%1%' %2%")
- % name
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("No tensor data found for Const tensor '{}' {}",
+ name,
+ CHECK_LOCATION().AsString()));
}
return std::make_pair(ConstTensor(tensorInfo, tensorData.get()), std::move(tensorData));
}
@@ -510,8 +505,7 @@ ModelPtr OnnxParser::LoadModelFromTextFile(const char* graphFile)
if (fd == nullptr)
{
- throw FileNotFoundException(boost::str(
- boost::format("Invalid (null) filename %1%") % CHECK_LOCATION().AsString()));
+ throw FileNotFoundException(fmt::format("Invalid (null) filename {}", CHECK_LOCATION().AsString()));
}
// Parse the file into a message
@@ -525,8 +519,7 @@ ModelPtr OnnxParser::LoadModelFromTextFile(const char* graphFile)
{
std::stringstream error;
error << "Failed to parse graph file";
- throw ParseException(boost::str(
- boost::format("%1% %2%") % error.str() % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
}
return modelProto;
}
@@ -545,8 +538,7 @@ ModelPtr OnnxParser::LoadModelFromBinaryFile(const char* graphFile)
if (fd == nullptr)
{
- throw FileNotFoundException(boost::str(
- boost::format("Invalid (null) filename %1%") % CHECK_LOCATION().AsString()));
+ throw FileNotFoundException(fmt::format("Invalid (null) filename {}", CHECK_LOCATION().AsString()));
}
// Parse the file into a message
@@ -562,8 +554,7 @@ ModelPtr OnnxParser::LoadModelFromBinaryFile(const char* graphFile)
{
std::stringstream error;
error << "Failed to parse graph file";
- throw ParseException(boost::str(
- boost::format("%1% %2%") % error.str() % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
}
return modelProto;
@@ -580,8 +571,8 @@ ModelPtr OnnxParser::LoadModelFromString(const std::string& protoText)
{
if (protoText == "")
{
- throw InvalidArgumentException(boost::str(
- boost::format("Invalid (empty) string for model parameter %1%") % CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("Invalid (empty) string for model parameter {}",
+ CHECK_LOCATION().AsString()));
}
// Parse the string into a message
ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
@@ -590,8 +581,7 @@ ModelPtr OnnxParser::LoadModelFromString(const std::string& protoText)
{
std::stringstream error;
error << "Failed to parse graph file";
- throw ParseException(boost::str(
- boost::format("%1% %2%") % error.str() % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("{} {}", error.str(), CHECK_LOCATION().AsString()));
}
return modelProto;
}
@@ -673,11 +663,10 @@ void OnnxParser::LoadGraph()
}
else
{
- throw ParseException(boost::str(
- boost::format("Unsupported operation %1% for node '%2%' %3%")
- % operation
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Unsupported operation {} for node '{}' {}",
+ operation,
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
}
}
@@ -780,12 +769,11 @@ void OnnxParser::GetInputAndParam(const onnx::NodeProto& node,
}
else
{
- throw ParseException(boost::str(
- boost::format("One of the input tensors ('%1%' or '%2%') should be constant in node '%3%' %4%")
- % node.input(0)
- % node.input(1)
- % node.name()
- % location.AsString()));
+ throw ParseException(fmt::format("One of the input tensors ('{}' or '{}') should be constant in node '{}' {}",
+ node.input(0),
+ node.input(1),
+ node.name(),
+ location.AsString()));
}
if(constName)
{
@@ -806,10 +794,10 @@ void OnnxParser::To1DTensor(const std::string& name, const Location& location)
{
if(shape[i] != 1)
{
- throw ParseException(boost::str(
- boost::format("Only tensors with shape [1, ..., 1, X] can be converted to 1D and %1% %2%")
- % TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype)
- % location.AsString()));
+ throw ParseException(
+ fmt::format("Only tensors with shape [1, ..., 1, X] can be converted to 1D and {} {}",
+ TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype),
+ location.AsString()));
}
}
newShape.push_back(shape[shape.GetNumDimensions() - 1]);
@@ -841,11 +829,10 @@ void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, cons
{
if(!m_TensorsInfo[node.input(2)].isConstant())
{
- throw ParseException(boost::str(
- boost::format("Bias '%1%' should be constant in Conv layer '%2%' %3%")
- % node.input(2)
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Bias '{}' should be constant in Conv layer '{}' {}",
+ node.input(2),
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
desc.m_BiasEnabled = true;
auto biasTensor = CreateConstTensor(node.input(2));
@@ -910,17 +897,16 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
if (weightInfo.GetShape()[1] != biasInfo.GetShape()[0])
{
- throw ParseException(boost::str(
- boost::format("Shape of weights '%1%' and bias of following Add node '%2%' do not match : %3%"
- " and %4% ( /!\\ bias should be a 1D tensor) %5%")
- % weightName
- % addNode->name()
- % TensorInfoAsString(*m_TensorsInfo[weightName].m_info,
- weightName,
- m_TensorsInfo[weightName].m_dtype)
- % TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
- m_TensorsInfo[biasName].m_dtype )
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Shape of weights '{}' and bias of following Add node '{}' do not match : {}"
+ " and {} ( /!\\ bias should be a 1D tensor) {}",
+ weightName,
+ addNode->name(),
+ TensorInfoAsString(*m_TensorsInfo[weightName].m_info, weightName,
+ m_TensorsInfo[weightName].m_dtype),
+ TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
+ m_TensorsInfo[biasName].m_dtype ),
+ CHECK_LOCATION().AsString()));
}
layer = m_Network->AddFullyConnectedLayer(desc,
CreateConstTensor(weightName).first,
@@ -1000,12 +986,11 @@ void OnnxParser::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescripto
}
else
{
- throw ParseException(boost::str(
- boost::format("Invalid auto_pad attribute for node %1%. "
- "Only SAME_UPPER, SAME_LOWER or VALID supported and found %2% %3%")
- % node.name()
- % paddingString
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Invalid auto_pad attribute for node {}. "
+ "Only SAME_UPPER, SAME_LOWER or VALID supported and found {} {}",
+ node.name(),
+ paddingString,
+ CHECK_LOCATION().AsString()));
}
auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
uint32_t inputHeight = inputInfo.GetShape()[2];
@@ -1046,13 +1031,13 @@ std::pair<std::string, std::string> OnnxParser::AddPrepareBroadcast(const std::s
if(input1Shape.GetNumDimensions() < input0Shape.GetNumDimensions())
{
- auto outputName = boost::str(boost::format("reshape_output_%1%") % input1);
+ auto outputName = fmt::format("reshape_output_{}", input1);
PrependForBroadcast(outputName, input1, input0);
inputs.second = outputName;
}
else if(input0Shape.GetNumDimensions() < input1Shape.GetNumDimensions())
{
- auto outputName = boost::str(boost::format("reshape_output_%1%") % input0);
+ auto outputName = fmt::format("reshape_output_{}", input0);
PrependForBroadcast(outputName, input0, input1);
inputs.first = outputName;
}
@@ -1166,16 +1151,16 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
unsigned int dim1 = input1.GetShape()[i];
if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
{
- throw ParseException(boost::str(
- boost::format("Broadcast is only supported for scalar or 1D tensors in Add node '%1%'. "
- "Input dimensions should either match or one should be of size 1 and here, "
- "%2% and %3% %4%")
- % node.name()
- % TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
- m_TensorsInfo[inputs.first].m_dtype)
- % TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
- m_TensorsInfo[inputs.second].m_dtype)
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Broadcast is only supported for scalar or 1D tensors in Add node '{}'. "
+ "Input dimensions should either match or one should be of size 1 and here, "
+ "{} and {} {}",
+ node.name(),
+ TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
+ m_TensorsInfo[inputs.first].m_dtype),
+ TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
+ m_TensorsInfo[inputs.second].m_dtype),
+ CHECK_LOCATION().AsString()));
}
}
@@ -1190,10 +1175,10 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
// register the input connection -> for constant inputs, we need to make a newDim constant layer
if(m_TensorsInfo[inputs.first].isConstant()) {
- CreateConstantLayer(inputs.first, boost::str(boost::format("Add:constant_of_%1%") % node.input(0)));
+ CreateConstantLayer(inputs.first, fmt::format("Add:constant_of_{}", node.input(0)));
}
if(m_TensorsInfo[inputs.second].isConstant()) {
- CreateConstantLayer(inputs.second, boost::str(boost::format("Add:constant_of_%1%") % node.input(1)));
+ CreateConstantLayer(inputs.second, fmt::format("Add:constant_of_{}", node.input(1)));
}
RegisterInputSlots(layer, {inputs.first, inputs.second});
@@ -1227,11 +1212,11 @@ void OnnxParser::ParseBatchNormalization(const onnx::NodeProto& node)
auto tensor = node.input(ind);
if(! m_TensorsInfo[tensor].isConstant())
{
- throw ParseException(boost::str(
- boost::format("Input tensor '%1%' should be constant in BatchNormalization node '%2%' %3%")
- % tensor
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Input tensor '{}' should be constant in BatchNormalization node '{}' {}",
+ tensor,
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -1266,10 +1251,9 @@ void OnnxParser::ParseConstant(const onnx::NodeProto& node)
CHECK_VALID_SIZE(static_cast<size_t>(node.attribute_size()), 1);
if (!node.attribute(0).has_t())
{
- throw ParseException(boost::str(
- boost::format("Value not found for Constant node '%1%' %2%")
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Value not found for Constant node '{}' {}",
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
const onnx::TensorProto& onnxTensor = node.attribute(0).t();
@@ -1294,21 +1278,21 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
if(m_TensorsInfo[node.input(0)].m_info->GetNumDimensions() != 4)
{
- throw ParseException(boost::str(
- boost::format("ArmNN only supports 2D convolution and Conv layer '%1%' input %2% %3%")
- % node.name()
- % TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
- m_TensorsInfo[node.input(0)].m_dtype)
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("ArmNN only supports 2D convolution and Conv layer '{}' input {} {}",
+ node.name(),
+ TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
+ m_TensorsInfo[node.input(0)].m_dtype),
+ CHECK_LOCATION().AsString()));
}
if(!m_TensorsInfo[node.input(1)].isConstant())
{
- throw ParseException(boost::str(
- boost::format("Weights '%1%' should be constant in Conv layer '%2%' %3%")
- % node.input(1)
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Weights '{}' should be constant in Conv layer '{}' {}",
+ node.input(1),
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
@@ -1324,12 +1308,10 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
if (dilation != 1u)
{
ss << "... ]";
- throw ParseException(boost::str(
- boost::format("ArmNN only supports Convolution layers with dilations [1,1], and node '%1%' "
- "has dilatation %2% %3%")
- % node.name()
- % ss.str()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("ArmNN only supports Convolution layers with dilations [1,1], and node '{}' "
+ "has dilatation {} {}",
+ node.name(), ss.str(), CHECK_LOCATION().AsString()));
}
}
}
@@ -1368,12 +1350,12 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
}
else
{
- throw ParseException(boost::str(
- boost::format("Invalid auto_pad attribute for node %1%. "
- "Only SAME_UPPER, SAME_LOWER or VALID supported and found %2% %3%")
- % node.name()
- % paddingString
- % CHECK_LOCATION().AsString()));
+ throw ParseException(
+ fmt::format("Invalid auto_pad attribute for node {}. Only SAME_UPPER, SAME_LOWER or VALID "
+ "supported and found {} {}",
+ node.name(),
+ paddingString,
+ CHECK_LOCATION().AsString()));
}
uint32_t inputHeight = inputInfo.GetShape()[2];
uint32_t inputWidth = inputInfo.GetShape()[3];
@@ -1410,15 +1392,13 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
if (group > inputInfo.GetShape()[1])
{
throw ParseException(
- boost::str(
- boost::format(
- "Error parsing Convolution node: %1%. "
- "The 'group'=%2% parameter cannot be larger than the "
- "channel of the input shape=%3% (in NCHW format). %4%") %
- node.name() %
- group %
- inputInfo.GetShape()[1] %
- CHECK_LOCATION().AsString()));
+ fmt::format("Error parsing Convolution node: {}. "
+ "The 'group'={} parameter cannot be larger than the "
+ "channel of the input shape={} (in NCHW format). {}",
+ node.name(),
+ group,
+ inputInfo.GetShape()[1],
+ CHECK_LOCATION().AsString()));
}
else if (group == inputInfo.GetShape()[1])
{
@@ -1431,14 +1411,13 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
{
// TODO: split the input by channels into channels/groups separate convolutions
// and concatenate the results afterwards
- throw ParseException(boost::str(
- boost::format("Error parsing Convolution node: %1%. "
- "The 'group'=%2% parameter should be 1 or be equal to the "
- "channel of the input shape=%3% (in NCHW format). %4%") %
- node.name() %
- group %
- inputInfo.GetShape()[1] %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Error parsing Convolution node: {}. "
+ "The 'group'={} parameter should be 1 or be equal to the "
+ "channel of the input shape={} (in NCHW format). {}",
+ node.name(),
+ group,
+ inputInfo.GetShape()[1],
+ CHECK_LOCATION().AsString()));
}
}
@@ -1449,11 +1428,10 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
{
if(!m_TensorsInfo[node.input(2)].isConstant())
{
- throw ParseException(boost::str(
- boost::format("Bias '%1%' should be constant in Conv layer '%2%' %3%")
- % node.input(2)
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Bias '{}' should be constant in Conv layer '{}' {}",
+ node.input(2),
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
desc.m_BiasEnabled = true;
auto biasTensor = CreateConstTensor(node.input(2));
@@ -1505,9 +1483,8 @@ void OnnxParser::ParseFlatten(const onnx::NodeProto& node)
/// Check Axis is within dimensions
if (axis < 0 || axis >= inputShape.GetNumDimensions())
{
- throw ParseException( boost::str(
- boost::format("Axis '%1%' invalid. Tensor has '%2%' dimensions in FlattenLayer '%3%'")
- % axis % inputShape.GetNumDimensions() % node.name()));
+ throw ParseException(fmt::format("Axis '{}' invalid. Tensor has '{}' dimensions in FlattenLayer '{}'",
+ axis, inputShape.GetNumDimensions(), node.name()));
}
/// If axis chosen is 0 dimension1 will always be 1 in output , default dimension2 to 1 because 0 is invalid
@@ -1578,11 +1555,10 @@ void OnnxParser::ParseReshape(const onnx::NodeProto& node)
if(!m_TensorsInfo[node.input(1)].isConstant())
{
- throw ParseException(boost::str(
- boost::format("Shape '%1%' should be constant in Reshape layer '%2%' %3%")
- % node.input(1)
- % node.name()
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Shape '{}' should be constant in Reshape layer '{}' {}",
+ node.input(1),
+ node.name(),
+ CHECK_LOCATION().AsString()));
}
if(m_TensorsInfo[node.input(0)].isConstant())
@@ -1648,7 +1624,7 @@ void OnnxParser::PrependForBroadcast(const std::string& outputName,
//add reshape layer if the parent was not constant...
if( ! m_TensorsInfo[input0].isConstant())
{
- CreateReshapeLayer(input0, outputName, boost::str(boost::format("Add:reshapeOf%1%") % input0));
+ CreateReshapeLayer(input0, outputName, fmt::format("Add:reshapeOf{}", input0));
}
else //make it constant and it will be create in Add
{
@@ -1679,8 +1655,7 @@ void OnnxParser::SetupOutputLayers()
{
if(m_Graph->output_size() == 0)
{
- throw ParseException(boost::str(boost::format("The given model does not have any outputs %1%")
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The given model does not have any outputs {}", CHECK_LOCATION().AsString()));
}
for(int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
@@ -1699,10 +1674,10 @@ void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<
if (tensorIds.size() != layer->GetNumInputSlots())
{
throw ParseException(
- boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%) %3%") %
- tensorIds.size() %
- layer->GetNumInputSlots() %
- CHECK_LOCATION().AsString()));
+ fmt::format("The number of tensor inputs ({}) does not match the number expected ({}) {}",
+ tensorIds.size(),
+ layer->GetNumInputSlots(),
+ CHECK_LOCATION().AsString()));
}
for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
{
@@ -1726,10 +1701,10 @@ void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector
if (tensorIds.size() != layer->GetNumOutputSlots())
{
throw ParseException(
- boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%) %3% ")
- % tensorIds.size()
- % layer->GetNumOutputSlots()
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of tensor outputs ({}) does not match the number expected ({}) {} ",
+ tensorIds.size(),
+ layer->GetNumOutputSlots(),
+ CHECK_LOCATION().AsString()));
}
for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
@@ -1750,11 +1725,10 @@ void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector
// assuming there is only one producer for that tensor
if (tensorSlots.outputSlot != nullptr)
{
- throw ParseException(boost::str(
- boost::format("Another layer has already registered itself as the producer of "
- "tensor:%1% %2%")
- % tensorId
- % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
+ "tensor:{} {}",
+ tensorId,
+ CHECK_LOCATION().AsString()));
}
tensorSlots.outputSlot = slot;
}
@@ -1770,8 +1744,8 @@ BindingPointInfo OnnxParser::GetNetworkInputBindingInfo(const std::string& name)
return std::make_pair(static_cast<armnn::LayerBindingId>(i), ToTensorInfo(input));
}
}
- throw InvalidArgumentException(boost::str(boost::format("The input layer '%1%' does not exist %2%")
- % name % CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("The input layer '{}' does not exist {}",
+ name, CHECK_LOCATION().AsString()));
}
BindingPointInfo OnnxParser::GetNetworkOutputBindingInfo(const std::string& name) const
@@ -1784,16 +1758,15 @@ BindingPointInfo OnnxParser::GetNetworkOutputBindingInfo(const std::string& name
return std::make_pair(static_cast<armnn::LayerBindingId>(i), ToTensorInfo(output));
}
}
- throw InvalidArgumentException(boost::str(boost::format("The output layer '%1%' does not exist %2%")
- % name % CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("The output layer '{}' does not exist {}",
+ name, CHECK_LOCATION().AsString()));
}
std::vector<std::string> OnnxParser::GetInputs(ModelPtr& model)
{
if(model == nullptr) {
- throw InvalidArgumentException(boost::str(
- boost::format("The given model cannot be null %1%")
- % CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("The given model cannot be null {}",
+ CHECK_LOCATION().AsString()));
}
std::vector<std::string> inputNames;
@@ -1816,9 +1789,8 @@ std::vector<std::string> OnnxParser::GetInputs(ModelPtr& model)
std::vector<std::string> OnnxParser::GetOutputs(ModelPtr& model)
{
if(model == nullptr) {
- throw InvalidArgumentException(boost::str(
- boost::format("The given model cannot be null %1%")
- % CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("The given model cannot be null {}",
+ CHECK_LOCATION().AsString()));
}
std::vector<std::string> outputNames;
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 0aad048970..d1d45f5583 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -27,7 +27,7 @@
#include <flatbuffers/flexbuffers.h>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <fstream>
#include <algorithm>
@@ -58,23 +58,21 @@ void CheckSubgraph(const TfLiteParser::ModelPtr & model,
if (model.get() == nullptr)
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with invalid (null) model. "
- "Possible reason is that the model is not yet loaded and Unpack(ed). "
- "subgraph:%2% at %3%") %
- location.m_Function %
- subgraphIndex %
- location.FileLine()));
+ fmt::format("{} was called with invalid (null) model. "
+ "Possible reason is that the model is not yet loaded and Unpack(ed). "
+ "subgraph:{} at {}",
+ location.m_Function,
+ subgraphIndex,
+ location.FileLine()));
}
else if (subgraphIndex >= model->subgraphs.size())
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid subgraph index. "
- "subgraph:%2% at %3%") %
- location.m_Function %
- subgraphIndex %
- location.FileLine()));
+ fmt::format("{} was called with an invalid subgraph index. "
+ "subgraph:{} at {}",
+ location.m_Function,
+ subgraphIndex,
+ location.FileLine()));
}
}
@@ -89,37 +87,34 @@ void CheckModel(const TfLiteParser::ModelPtr & model,
if (model.get() == nullptr)
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with invalid (null) model. "
- "Possible reason is that the model is not yet loaded and Unpack(ed). "
- "subgraph:%2% operator:%3% at %4%") %
- location.m_Function %
- subgraphIndex %
- operatorIndex %
- location.FileLine()));
+ fmt::format("{} was called with invalid (null) model. "
+ "Possible reason is that the model is not yet loaded and Unpack(ed). "
+ "subgraph:{} operator:{} at {}",
+ location.m_Function,
+ subgraphIndex,
+ operatorIndex,
+ location.FileLine()));
}
else if (subgraphIndex >= model->subgraphs.size())
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid subgraph index. "
- "subgraph:%2% operator:%3% at %4%") %
- location.m_Function %
- subgraphIndex %
- operatorIndex %
- location.FileLine()));
+ fmt::format("{} was called with an invalid subgraph index. "
+ "subgraph:{} operator:{} at {}",
+ location.m_Function,
+ subgraphIndex,
+ operatorIndex,
+ location.FileLine()));
}
else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
operatorIndex != VIRTUAL_OPERATOR_ID)
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid operator index. "
- "subgraph:%2% operator:%3% at %4%") %
- location.m_Function %
- subgraphIndex %
- operatorIndex %
- location.FileLine()));
+ fmt::format("{} was called with an invalid operator index. "
+ "subgraph:{} operator:{} at {}",
+ location.m_Function,
+ subgraphIndex,
+ operatorIndex,
+ location.FileLine()));
}
}
@@ -143,13 +138,12 @@ void CheckTensor(const TfLiteParser::ModelPtr & model,
if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid tensor index. "
- "subgraph:%2% tensor:%3% at %4%") %
- location.m_Function %
- subgraphIndex %
- tensorIndex %
- location.FileLine()));
+ fmt::format("{} was called with an invalid tensor index. "
+ "subgraph:{} tensor:{} at {}",
+ location.m_Function,
+ subgraphIndex,
+ tensorIndex,
+ location.FileLine()));
}
}
@@ -162,12 +156,7 @@ void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
if (rawPtr == nullptr)
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with a null tensor pointer. "
- "at %2%") %
- location.m_Function %
- location.FileLine()));
-
+ fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
}
}
@@ -181,31 +170,28 @@ void CheckBuffer(const TfLiteParser::ModelPtr & model,
if (model.get() == nullptr)
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with invalid (null) model. "
- "Possible reason is that the model is not yet loaded and Unpack(ed). "
- "buffer:%2% at %3%") %
- location.m_Function %
- bufferIndex %
- location.FileLine()));
+ fmt::format("{} was called with invalid (null) model. "
+ "Possible reason is that the model is not yet loaded and Unpack(ed). "
+ "buffer:{} at {}",
+ location.m_Function,
+ bufferIndex,
+ location.FileLine()));
}
else if (bufferIndex >= model->buffers.size())
{
throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid buffer index. "
- "buffer index:%2% at %3%") %
- location.m_Function %
- bufferIndex %
- location.FileLine()));
+ fmt::format("{} was called with an invalid buffer index. "
+ "buffer index:{} at {}",
+ location.m_Function,
+ bufferIndex,
+ location.FileLine()));
}
else if (model->buffers[bufferIndex].get() == nullptr)
{
throw ParseException(
- boost::str(
- boost::format("The buffer #%1% is null. %3%") %
- bufferIndex %
- location.AsString()));
+ fmt::format("The buffer #{} is null. {}",
+ bufferIndex,
+ location.AsString()));
}
}
@@ -220,10 +206,9 @@ void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
if (bufferPtr == nullptr)
{
throw ParseException(
- boost::str(
- boost::format("BufferPtr is null for buffer:%1%. %2%") %
- bufferId %
- location.AsString()));
+ fmt::format("BufferPtr is null for buffer:{}. {}",
+ bufferId,
+ location.AsString()));
}
else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
tensorInfo.GetNumBytes() > bufferPtr->data.size())
@@ -263,16 +248,15 @@ bool IsActivationSupported(tflite::ActivationFunctionType activationType)
if (IsActivationSupported(OPTION->fused_activation_function) == false) \
{ \
throw ParseException( \
- boost::str( \
- boost::format("TfLite parser doesn't suppport fused activation: " \
- "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
- OPTION->fused_activation_function % \
- tflite::EnumNameActivationFunctionType(\
- OPTION->fused_activation_function) % \
- __func__ % \
- SUBGRAPH_INDEX % \
- OPERATOR_INDEX % \
- CHECK_LOCATION().FileLine())); \
+ fmt::format("TfLite parser doesn't suppport fused activation: " \
+ "{}/{} in {} subgraph:{} operator:{} at {}", \
+ OPTION->fused_activation_function, \
+ tflite::EnumNameActivationFunctionType(\
+ OPTION->fused_activation_function), \
+ __func__, \
+ SUBGRAPH_INDEX, \
+ OPERATOR_INDEX, \
+ CHECK_LOCATION().FileLine())); \
} \
} while(false)
@@ -352,12 +336,11 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
{
CheckLocation location = CHECK_LOCATION();
throw ParseException(
- boost::str(
- boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
- tensorPtr->type %
- tflite::EnumNameTensorType(tensorPtr->type) %
- tensorPtr->name %
- location.AsString()));
+ fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
+ tensorPtr->type,
+ tflite::EnumNameTensorType(tensorPtr->type),
+ tensorPtr->name,
+ location.AsString()));
}
}
std::vector<unsigned int> safeShape = shapes;
@@ -470,8 +453,7 @@ CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
IgnoreUnused(tensorPtr);
ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
ARMNN_ASSERT_MSG(bufferPtr != nullptr,
- boost::str(
- boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
+ fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
@@ -647,10 +629,9 @@ INetworkPtr TfLiteParser::CreateNetworkFromModel()
if (m_Model->subgraphs.size() != 1)
{
throw ParseException(
- boost::str(
- boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
- m_Model->subgraphs.size() %
- CHECK_LOCATION().AsString()));
+ fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
+ m_Model->subgraphs.size(),
+ CHECK_LOCATION().AsString()));
}
size_t subgraphIndex = 0;
@@ -667,10 +648,10 @@ INetworkPtr TfLiteParser::CreateNetworkFromModel()
if (builtinCode > tflite::BuiltinOperator_MAX)
{
- throw ParseException(boost::str(boost::format("Operator code %1% is out of range 0-%2%. "
- "subgraph:%3% operator idx:%4%. %5%") %
- builtinCode % tflite::BuiltinOperator_MAX % subgraphIndex %
- operatorIndex % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
+ "subgraph:{} operator idx:{}. {}",
+ builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
+ operatorIndex, CHECK_LOCATION().AsString()));
}
// lookup and call the parser function
@@ -732,12 +713,11 @@ void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
// assuming there is only one producer for that tensor
if (tensorSlots.outputSlot != nullptr)
{
- throw ParseException(boost::str(
- boost::format("Another layer has already registered itself as the producer of "
- "subgraph:%1% tensor:%2% %3%") %
- subgraphIndex %
- tensorIndex %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
+ "subgraph:{} tensor:{} {}",
+ subgraphIndex,
+ tensorIndex,
+ CHECK_LOCATION().AsString()));
}
tensorSlots.outputSlot = slot;
@@ -790,16 +770,15 @@ void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operato
{
// Do not add StandInLayer, throw ParseException instead
throw ParseException(
- boost::str(
- boost::format("Operator not supported. "
- "subgraph:%1% operator:%2% "
- "opcode_index:%3% opcode:%4% / %5% %6%") %
- subgraphIndex %
- operatorIndex %
- opcodeIndex %
- opcode %
- tflite::EnumNameBuiltinOperator(opcode) %
- CHECK_LOCATION().AsString()));
+ fmt::format("Operator not supported. "
+ "subgraph:{} operator:{} "
+ "opcode_index:{} opcode:{} / {} {}",
+ subgraphIndex,
+ operatorIndex,
+ opcodeIndex,
+ opcode,
+ tflite::EnumNameBuiltinOperator(opcode),
+ CHECK_LOCATION().AsString()));
}
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
@@ -809,7 +788,7 @@ void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operato
const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
StandInDescriptor descriptor(numInputs, numOutputs);
- auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
+ auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
// Add a non-executable StandInLayer as a placeholder for any unsupported operator
IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
@@ -872,7 +851,7 @@ void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* layer = nullptr;
- auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
if (inputs.size() == 3)
{
@@ -960,7 +939,7 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
armnn::IConnectableLayer* layer = nullptr;
- auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
if (inputs.size() == 3)
{
@@ -1007,7 +986,7 @@ void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1032,7 +1011,7 @@ void TfLiteParser::ParseExp(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Exp:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Exp:{}:{}", subgraphIndex, operatorIndex);
ElementwiseUnaryDescriptor desc;
desc.m_Operation = UnaryOperation::Exp;
@@ -1059,7 +1038,7 @@ void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
TransposeDescriptor desc;
if (inputs.size() == 2)
@@ -1161,7 +1140,7 @@ void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* layer = nullptr;
- auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
layer = m_Network->AddTransposeConvolution2dLayer(desc,
filterTensorAndData.first,
@@ -1220,7 +1199,7 @@ void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorInde
desc.m_Crops = crops;
desc.m_DataLayout = armnn::DataLayout::NHWC;
- auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
@@ -1249,7 +1228,7 @@ void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorInd
L2NormalizationDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
- auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1279,7 +1258,7 @@ void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
@@ -1309,7 +1288,7 @@ void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
@@ -1346,11 +1325,11 @@ void TfLiteParser::ParsePool(size_t subgraphIndex,
{
case PoolingAlgorithm::Average:
layerName =
- boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
break;
case PoolingAlgorithm::Max:
layerName =
- boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
+ fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
break;
default:
ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
@@ -1427,7 +1406,7 @@ void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
desc = SliceDescriptor(begin, size);
- auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
@@ -1460,7 +1439,7 @@ void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
@@ -1510,7 +1489,7 @@ void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorInde
desc.m_PadList = padList;
desc.m_DataLayout = armnn::DataLayout::NHWC;
- auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
@@ -1591,7 +1570,7 @@ void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
- auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo outputTensorInfo =
@@ -1656,7 +1635,7 @@ void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
desc.m_End = end;
desc.m_Stride = stride;
- auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1686,7 +1665,7 @@ void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
- auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1718,7 +1697,7 @@ void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
- auto layerName = boost::str(boost::format("Div:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1749,7 +1728,7 @@ void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
- auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1780,7 +1759,7 @@ void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
- auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1819,7 +1798,7 @@ void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
true : false;
- auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1842,7 +1821,7 @@ void TfLiteParser::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Neg:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex);
armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1879,7 +1858,7 @@ void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
}
- auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
@@ -1903,7 +1882,7 @@ void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -1960,7 +1939,7 @@ void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, A
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = str(boost::format("Activation:"));
+ auto layerName = fmt::format("Activation:");
ActivationDescriptor activationDesc;
activationDesc.m_Function = activationType;
@@ -1968,43 +1947,43 @@ void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, A
{
case ActivationFunction::ReLu:
{
- layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
break;
}
case ActivationFunction::BoundedReLu:
{
- layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
activationDesc.m_A = 6.0f;
activationDesc.m_B = 0.0f;
break;
}
case ActivationFunction::Sigmoid:
{
- layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
break;
}
case ActivationFunction::TanH:
{
- layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
activationDesc.m_A = 1.0f;
activationDesc.m_B = 1.0f;
break;
}
case ActivationFunction::LeakyReLu:
{
- layerName += str(boost::format("LEAKYRELU:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
activationDesc.m_A = options->alpha;
break;
}
case ActivationFunction::HardSwish:
- layerName += str(boost::format("HARDSWISH:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
break;
default:
{
throw ParseException(
- boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
- " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
+ fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
+ static_cast<int>(activationType), CHECK_LOCATION().AsString()));
}
}
@@ -2033,8 +2012,7 @@ armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & i
if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
{
throw ParseException(
- boost::str(
- boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
+ fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
}
auto targetNumElements =
@@ -2064,7 +2042,7 @@ void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
- auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
@@ -2188,13 +2166,13 @@ void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, Resiz
desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
desc.m_DataLayout = armnn::DataLayout::NHWC;
- auto layerName = str(boost::format("Resize:"));
+ auto layerName = fmt::format("Resize:");
switch (resizeMethod)
{
case ResizeMethod::Bilinear:
{
- layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
@@ -2204,14 +2182,14 @@ void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, Resiz
}
case ResizeMethod::NearestNeighbor:
{
- layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
+ layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
break;
}
default:
{
throw ParseException(
- boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
- " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
+ fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
+ static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
}
}
@@ -2263,7 +2241,7 @@ void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex
inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
}
- auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
@@ -2304,19 +2282,17 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
if (weightsDimension != 2)
{
throw ParseException(
- boost::str(
- boost::format(
- "Dimension %1% for Fully Connected weights is not supported by Armnn. "
- "Node %2%")
- % weightsDimension
- % CHECK_LOCATION().AsString()));
+ fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
+ "Node {}",
+ weightsDimension,
+ CHECK_LOCATION().AsString()));
}
auto filterTensorAndData = CreateConstTensor(inputs[1],
filterTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* layer = nullptr;
- auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
if (inputs.size() == 3)
{
@@ -2356,17 +2332,15 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to deduce input tensor shape from filter size %1%")
- % reshapedDimensions[1]
- % CHECK_LOCATION().AsString()));
+ fmt::format("Failed to deduce input tensor shape from filter size {} {}",
+ reshapedDimensions[1],
+ CHECK_LOCATION().AsString()));
}
armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
- std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
+ std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
armnn::ReshapeDescriptor desc;
desc.m_TargetShape = reshapedTensorInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
@@ -2440,7 +2414,7 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat
auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
- auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
layerName.c_str());
@@ -2498,7 +2472,7 @@ void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
desc.m_InputShape = inputTensorInfo.GetShape();
- auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -2531,13 +2505,11 @@ void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
if (unpackAxis >= inputTensorInfo.GetNumDimensions())
{
throw ParseException(
- boost::str(
- boost::format(
- "The unpack axis: %1% cannot be greater than or equal to "
- "the number of input dimension %2% %3%")
- % unpackAxis
- % inputTensorInfo.GetNumDimensions()
- % CHECK_LOCATION().AsString()));
+ fmt::format("The unpack axis: {} cannot be greater than or equal to "
+ "the number of input dimension {} {}",
+ unpackAxis,
+ inputTensorInfo.GetNumDimensions(),
+ CHECK_LOCATION().AsString()));
}
unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
@@ -2584,7 +2556,7 @@ void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
}
- auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -2598,7 +2570,7 @@ void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
{
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
- std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
+ std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
armnn::ReshapeDescriptor desc;
desc.m_TargetShape = outputTensorInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
@@ -2651,13 +2623,10 @@ void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
if (inputDimSize > MaxNumOfTensorDimensions)
{
throw ParseException(
- boost::str(
- boost::format(
- "The number of dimensions: %1% for input tensors of the "
- "split op cannot be greater than %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % MaxNumOfTensorDimensions
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ MaxNumOfTensorDimensions,
+ CHECK_LOCATION().AsString()));
}
std::vector<unsigned int> splitterDimSizes(inputDimSize);
@@ -2685,7 +2654,7 @@ void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
}
- auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -2736,13 +2705,11 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
if (inputDimSize > MaxNumOfTensorDimensions)
{
throw ParseException(
- boost::str(
- boost::format(
- "The number of dimensions: %1% for input tensors of the "
- "SplitV op cannot be greater than %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % MaxNumOfTensorDimensions
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of dimensions: {} for input tensors of the "
+ "SplitV op cannot be greater than {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ MaxNumOfTensorDimensions,
+ CHECK_LOCATION().AsString()));
}
// Get split axis
@@ -2833,7 +2800,7 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
accumSplit += splitSize;
}
- auto layerName = boost::str(boost::format("SplitV:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
@@ -2862,7 +2829,7 @@ void TfLiteParser::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
- auto layerName = boost::str(boost::format("ArgMax:%1%:%2%") % subgraphIndex % operatorIndex);
+ auto layerName = fmt::format("ArgMax:{}:{}", subgraphIndex, operatorIndex);
armnn::TensorInfo sizeTensorInfo0 = ToTensorInfo(inputs[0]);
armnn::TensorInfo sizeTensorInfo1 = ToTensorInfo(inputs[1]);
@@ -2934,12 +2901,11 @@ armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnecta
default:
{
throw ParseException(
- boost::str(
- boost::format("TfLite parser doesn't suppport fused activation: "
- "%1%/%2% %3% ") %
- activationType %
- tflite::EnumNameActivationFunctionType(activationType) %
- CHECK_LOCATION().AsString()));
+ fmt::format("TfLite parser doesn't suppport fused activation: "
+ "{}/{} {} ",
+ activationType,
+ tflite::EnumNameActivationFunctionType(activationType),
+ CHECK_LOCATION().AsString()));
}
}
@@ -2957,19 +2923,19 @@ TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
{
if (fileName == nullptr)
{
- throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
+ throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
CHECK_LOCATION().AsString()));
}
std::error_code errorCode;
fs::path pathToFile(fileName);
if (!fs::exists(pathToFile, errorCode))
{
- std::string locationString = CHECK_LOCATION().AsString();
- std::string msg = boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
- fileName %
- errorCode %
- locationString);
- throw FileNotFoundException(msg);
+ //fmt::format() could not be used here (format error)
+ std::stringstream msg;
+ msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
+ << " " << CHECK_LOCATION().AsString();
+
+ throw FileNotFoundException(msg.str());
}
std::ifstream file(fileName, std::ios::binary);
std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
@@ -2981,17 +2947,17 @@ TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryC
{
if (binaryContent == nullptr)
{
- throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
+ throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
CHECK_LOCATION().AsString()));
}
flatbuffers::Verifier verifier(binaryContent, len);
if (verifier.VerifyBuffer<tflite::Model>() == false)
{
throw ParseException(
- boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
- "flatbuffers format. size:%1% %2%") %
- len %
- CHECK_LOCATION().AsString()));
+ fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
+ "flatbuffers format. size:{} {}",
+ len,
+ CHECK_LOCATION().AsString()));
}
return tflite::UnPackModel(binaryContent);
}
@@ -3098,13 +3064,13 @@ void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
if (tensorIndexes.size() != layer->GetNumInputSlots())
{
throw ParseException(
- boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
- " for subgraph:%3% operator index:%4% %5%") %
- tensorIndexes.size() %
- layer->GetNumInputSlots() %
- subgraphIndex %
- operatorIndex %
- CHECK_LOCATION().AsString()));
+ fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
+ " for subgraph:{} operator index:{} {}",
+ tensorIndexes.size(),
+ layer->GetNumInputSlots(),
+ subgraphIndex,
+ operatorIndex,
+ CHECK_LOCATION().AsString()));
}
for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
@@ -3125,13 +3091,13 @@ void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
if (tensorIndexes.size() != layer->GetNumOutputSlots())
{
throw ParseException(
- boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
- " for subgraph:%3% operator index:%4% %5%") %
- tensorIndexes.size() %
- layer->GetNumOutputSlots() %
- subgraphIndex %
- operatorIndex %
- CHECK_LOCATION().AsString()));
+ fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
+ " for subgraph:{} operator index:{} {}",
+ tensorIndexes.size(),
+ layer->GetNumOutputSlots(),
+ subgraphIndex,
+ operatorIndex,
+ CHECK_LOCATION().AsString()));
}
for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
@@ -3199,7 +3165,7 @@ void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
tensorInfo,
armnn::Optional<armnn::PermutationVector&>());
- std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
+ std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
IConnectableLayer *layer =
m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
@@ -3305,13 +3271,12 @@ BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
}
throw ParseException(
- boost::str(
- boost::format("No input binding found for subgraph:%1% and name:%2%. "
- "Possible inputs are: [%3%] %4%") %
- subgraphId %
- name %
- bindings.str() %
- CHECK_LOCATION().AsString()));
+ fmt::format("No input binding found for subgraph:{} and name:{}. "
+ "Possible inputs are: [{}] {}",
+ subgraphId,
+ name,
+ bindings.str(),
+ CHECK_LOCATION().AsString()));
}
BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
@@ -3338,13 +3303,12 @@ BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
}
throw ParseException(
- boost::str(
- boost::format("No output binding found for subgraph:%1% and name:%2%. "
- "Possible outputs are: [%3%] %4%") %
- subgraphId %
- name %
- bindings.str() %
- CHECK_LOCATION().AsString()));
+ fmt::format("No output binding found for subgraph:{} and name:{}. "
+ "Possible outputs are: [{}] {}",
+ subgraphId,
+ name,
+ bindings.str(),
+ CHECK_LOCATION().AsString()));
}
size_t TfLiteParser::GetSubgraphCount() const
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index f2f723b5d5..50a312fcf6 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -19,7 +19,7 @@
#include <test/TensorHelpers.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
@@ -81,13 +81,12 @@ struct ParserFlatbuffersFixture
if (ret != armnn::Status::Success)
{
throw armnn::Exception(
- boost::str(
- boost::format("The runtime failed to load the network. "
- "Error was: %1%. in %2% [%3%:%4%]") %
- errorMessage %
- __func__ %
- __FILE__ %
- __LINE__));
+ fmt::format("The runtime failed to load the network. "
+ "Error was: {}. in {} [{}:{}]",
+ errorMessage,
+ __func__,
+ __FILE__,
+ __LINE__));
}
}
@@ -275,10 +274,10 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
// Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
- boost::str(boost::format("Number of dimensions expected %1%, but got %2% for output layer %3%")
- % NumOutputDimensions
- % outputNumDimensions
- % it.first));
+ fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
+ NumOutputDimensions,
+ outputNumDimensions,
+ it.first));
armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
outputStorage.emplace(it.first, MakeTensor<DataType2, NumOutputDimensions>(outputTensorInfo));
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 6cf1df1b07..255233bab3 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -23,7 +23,7 @@
#include <tensorflow/core/framework/graph.pb.h>
-#include <boost/format.hpp>
+#include <fmt/core.h>
#include <fmt/format.h>
#include <numeric>
@@ -56,26 +56,22 @@ void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
- "but found %4% instead %5%")
- % attribName
- % nodeDef.name()
- % static_cast<int>(expectedValueCase)
- % static_cast<int>(attrValue.value_case())
- % CHECK_LOCATION().AsString()));
+ fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
+ "but found {} instead {}",
+ attribName,
+ nodeDef.name(),
+ static_cast<int>(expectedValueCase),
+ static_cast<int>(attrValue.value_case()),
+ CHECK_LOCATION().AsString()));
}
}
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Could not find required attribute %1% in node %2% %3%")
- % attribName
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Could not find required attribute {} in node {} {}",
+ attribName,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -96,15 +92,13 @@ void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
- "but found %4% instead %5%")
- % attribName
- % nodeDef.name()
- % static_cast<int>(expectedValueCase)
- % static_cast<int>(attrValue.value_case())
- % CHECK_LOCATION().AsString()));
+ fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
+ "but found {} instead {}",
+ attribName,
+ nodeDef.name(),
+ static_cast<int>(expectedValueCase),
+ static_cast<int>(attrValue.value_case()),
+ CHECK_LOCATION().AsString()));
}
}
}
@@ -243,10 +237,8 @@ TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& t
if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "At most one component of shape can be -1 %1%")
- % CHECK_LOCATION().AsString()));
+ fmt::format("At most one component of shape can be -1 {}",
+ CHECK_LOCATION().AsString()));
}
auto targetNumElements =
@@ -299,11 +291,9 @@ OutputId ParseOutputId(const std::string & name)
if (n<0 || n>100)
{
throw ParseException(
- boost::str(
- boost::format(
- "Output tensor id is out of range for %1% %2%")
- % name
- % CHECK_LOCATION().AsString()));
+ fmt::format("Output tensor id is out of range for {} {}",
+ name,
+ CHECK_LOCATION().AsString()));
}
outputNum = static_cast<unsigned int>(n);
}
@@ -314,26 +304,22 @@ OutputId ParseOutputId(const std::string & name)
if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
{ \
throw ParseException( \
- boost::str( \
- boost::format( \
- "Unsupported data format %1% passed for %2% node %3%. " \
- "Only NHWC and NCHW supported %4%") \
- % FORMAT \
- % NODE_TYPE \
- % NODE_DEF.name() \
- % CHECK_LOCATION().AsString())); \
+ fmt::format("Unsupported data format {} passed for {} node {}. " \
+ "Only NHWC and NCHW supported {}", \
+ FORMAT, \
+ NODE_TYPE, \
+ NODE_DEF.name(), \
+ CHECK_LOCATION().AsString())); \
}
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
if(PADDING != "SAME" && PADDING != "VALID" ) \
{ \
throw ParseException( \
- boost::str( \
- boost::format( \
- "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
- % PADDING \
- % NODE_DEF.name() \
- % CHECK_LOCATION().AsString())); \
+ fmt::format("Only 'SAME' and 'VALID' padding supported. Got {} for {} {}", \
+ PADDING, \
+ NODE_DEF.name(), \
+ CHECK_LOCATION().AsString())); \
} \
} // namespace
@@ -473,13 +459,11 @@ public:
if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
{
throw ParseException(
- boost::str(
- boost::format(
- "The requested output slot #%1% "
- "for %2% does not exist %3%")
- % armnnOutputSlotIdx
- % m_Layer->GetName()
- % CHECK_LOCATION().AsString()));
+ fmt::format("The requested output slot #{} "
+ "for {} does not exist {}",
+ armnnOutputSlotIdx,
+ m_Layer->GetName(),
+ CHECK_LOCATION().AsString()));
}
return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
}
@@ -527,12 +511,10 @@ const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeD
if (nodeDef->input_size() != 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "Identity node should have a single input! %1% has %2% inputs %3%")
- % nodeDef->name()
- % nodeDef->input_size()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Identity node should have a single input! {} has {} inputs {}",
+ nodeDef->name(),
+ nodeDef->input_size(),
+ CHECK_LOCATION().AsString()));
}
auto it = m_NodesByName.find(nodeDef->input(0));
@@ -544,11 +526,9 @@ const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeD
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Cannot find what the Identity node %1% is linked to! %2%")
- % nodeDef->name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Cannot find what the Identity node {} is linked to! {}",
+ nodeDef->name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -578,12 +558,10 @@ TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
if (inputIt == m_NodesByName.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Can't find node '%1%', which is listed as an input of '%2%' %3%")
- % nodeDef.input(j)
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Can't find node '{}', which is listed as an input of '{}' {}",
+ nodeDef.input(j),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
}
@@ -601,13 +579,11 @@ TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
if (numInputs != expectedNumInputs)
{
throw ParseException(
- boost::str(
- boost::format(
- "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
- % nodeDef.name()
- % expectedNumInputs
- % numInputs
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unexpected number of inputs for node {}. Expected {}, found {} {}",
+ nodeDef.name(),
+ expectedNumInputs,
+ numInputs,
+ CHECK_LOCATION().AsString()));
}
// Fetches the corresponding ParsedTfOperation operations
std::vector<OutputOfParsedTfOperation> result;
@@ -617,11 +593,9 @@ TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
if (it == m_ParsedTfOperations.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Node with name '%1%' has not been parsed %2%")
- % node.m_IndexedValue->name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Node with name '{}' has not been parsed {}",
+ node.m_IndexedValue->name(),
+ CHECK_LOCATION().AsString()));
}
ParsedTfOperation* parsedOp = it->second.get();
// Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
@@ -657,11 +631,10 @@ IConnectableLayer* TfParser::CreateAdditionLayer(
else
{
throw ParseException(
- boost::str(
- boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
- % layerName
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported broadcast configuration for {} operation {} {}",
+ layerName,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
@@ -737,12 +710,10 @@ ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, con
{
// should never happen
throw ParseException(
- boost::str(
- boost::format(
- "AddN Node with name '%1%' has less than two (%2) inputs %3%")
- % nodeDef.name()
- % std::to_string(numberOfInputs)
- % CHECK_LOCATION().AsString()));
+ fmt::format("AddN Node with name '{}' has less than two ({}) inputs {}",
+ nodeDef.name(),
+ std::to_string(numberOfInputs),
+ CHECK_LOCATION().AsString()));
}
else if (numberOfInputs == 2)
{
@@ -942,12 +913,10 @@ DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
break;
default:
throw ParseException(
- boost::str(
- boost::format(
- "Unknown DataType %1% for node %2% %3%")
- % tensorflow::DataType_Name(tfDataType)
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unknown DataType {} for node {} {}",
+ tensorflow::DataType_Name(tfDataType),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -1072,11 +1041,9 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
if (nodeDef.attr().count("value") == 0)
{
throw ParseException(
- boost::str(
- boost::format(
- "Value not found for Const node - %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Value not found for Const node - {} {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
@@ -1124,11 +1091,9 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
if (numElements == 0)
{
throw ParseException(
- boost::str(
- boost::format(
- "No tensor shape found for Const node - %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("No tensor shape found for Const node - {} {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -1136,11 +1101,9 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
if (tensorData.empty())
{
throw ParseException(
- boost::str(
- boost::format(
- "No tensor data found for Const node - %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("No tensor data found for Const node - {} {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
@@ -1152,14 +1115,12 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
if (tensorData.size() > tensorInfo.GetNumBytes())
{
throw ParseException(
- boost::str(
- boost::format(
- "Number of elements (%1%) should be less than or equal "
- "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
- % (tensorData.size() / GetDataTypeSize(dataType))
- % tensorInfo.GetNumElements()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Number of elements ({}) should be less than or equal "
+ "to the number of elements implied by the shape argument ({}) for Const node - {} {}",
+ (tensorData.size() / GetDataTypeSize(dataType)),
+ tensorInfo.GetNumElements(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
@@ -1193,10 +1154,8 @@ unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOper
}
}
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports operators with constant axis. %1%")
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports operators with constant axis. {}",
+ CHECK_LOCATION().AsString()));
}
@@ -1211,12 +1170,10 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
- % nodeDef.name()
- % inputs[1].m_IndexedValue->GetNode().name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports Convolution layers with constant weights for {}, input {} {}",
+ nodeDef.name(),
+ inputs[1].m_IndexedValue->GetNode().name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* weightNode =
PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
@@ -1234,11 +1191,9 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
if (dilation != 1u)
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports Convolution layers with dilations [1,1,1,1] for {} {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
}
@@ -1353,13 +1308,11 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports Depthwise Convolution layer with constant weights. "
- "Non const input found %1% for node %2% %3%")
- % inputs[1].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports Depthwise Convolution layer with constant weights. "
+ "Non const input found {} for node {} {}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* weightNode =
@@ -1472,12 +1425,10 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
if (inputTensorInfo.GetNumDimensions() > 4) {
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
@@ -1511,23 +1462,19 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
else
{
throw InvalidArgumentException(
- boost::str(
- boost::format(
- "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
- % expandDim
- % inputDimSize
- % CHECK_LOCATION().AsString()));
+ fmt::format("Cannot expand dimension {} in input tensor with {} dimension {}",
+ expandDim,
+ inputDimSize,
+ CHECK_LOCATION().AsString()));
}
if (outputDims.size() > 4)
{
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
- % outputDims.size()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
+ outputDims.size(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
@@ -1566,24 +1513,22 @@ ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDe
if (inputTensorInfo.GetDataType()!=armnn::DataType::Signed32)
{
throw ParseException(
- fmt::format(
- "The axis parameter of ExpandDims operation given as second input is not of type int32. "
- "Input {0} Node {1} {2}",
- inputs[1].m_IndexedValue->GetNode().name(),
- nodeDef.name(),
- CHECK_LOCATION().AsString()));
+ fmt::format("The axis parameter of ExpandDims operation given as second input is not of type int32."
+ " Input {0} Node {1} {2}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
// ensure the second input is a constant value
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- fmt::format(
- "ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
- "Input {0} Node {1} {2}",
- inputs[1].m_IndexedValue->GetNode().name(),
- nodeDef.name(),
- CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
+ "Input {0} Node {1} {2}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
// make sure the second input is scalar or contains only a single value
@@ -1593,13 +1538,12 @@ ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDe
if (inputTensorInfo.GetNumElements() != 1)
{
throw ParseException(
- fmt::format(
- "The axis parameter of ExpandDims operation given as second input is not "
- "allowed to hold more than one value. "
- "Input {0} Node {1} {2}",
- inputs[1].m_IndexedValue->GetNode().name(),
- nodeDef.name(),
- CHECK_LOCATION().AsString()));
+ fmt::format("The axis parameter of ExpandDims operation given as second input is not "
+ "allowed to hold more than one value. "
+ "Input {0} Node {1} {2}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* expandDimsNode =
@@ -1633,13 +1577,11 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports FusedBatchNormalization layers with constant scale. "
- "Input %1%. Node %2% %3%")
- % inputs[1].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports FusedBatchNormalization layers with constant scale. "
+ "Input {}. Node {} {}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* scaleNode =
PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
@@ -1647,13 +1589,11 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports FusedBatchNormalization layers with constant offset. "
- "Input %1%. Node %2% %3%")
- % inputs[2].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports FusedBatchNormalization layers with constant offset. "
+ "Input {}. Node {} {}",
+ inputs[2].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* offsetNode =
PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
@@ -1661,13 +1601,11 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports FusedBatchNormalization layers with constant mean. "
- "Input %1%. Node %2% %3%")
- % inputs[3].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports FusedBatchNormalization layers with constant mean. "
+ "Input {}. Node {} {}",
+ inputs[3].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* meanNode =
PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
@@ -1675,13 +1613,11 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports FusedBatchNormalization layers with constant variance. "
- "Input %1%. Node %2% %3%")
- % inputs[4].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports FusedBatchNormalization layers with constant variance. "
+ "Input {}. Node {} {}",
+ inputs[4].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* varianceNode =
PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
@@ -1781,12 +1717,10 @@ ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
if (inputs.size() != 2)
{
throw ParseException(
- boost::str(
- boost::format(
- "Maximum expects two inputs!. Got %1% for Node %2% %3%")
- % inputs.size()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Maximum expects two inputs!. Got {} for Node {} {}",
+ inputs.size(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
@@ -1848,11 +1782,10 @@ std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwise
else
{
throw ParseException(
- boost::str(
- boost::format("Unsupported broadcast configuration for %1% operation %2% %3%")
- % layerName
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported broadcast configuration for {} operation {} {}",
+ layerName,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
return {input0Slot, input1Slot};
@@ -2040,12 +1973,10 @@ ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, co
if (numInputs < 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "Pack/Stack expects at least one input. Got %1% for Node %2% %3%")
- % numInputs
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Pack/Stack expects at least one input. Got {} for Node {} {}",
+ numInputs,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
@@ -2060,12 +1991,10 @@ ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, co
if (!(axis < sNumDimensions && axis >= -sNumDimensions))
{
throw ParseException(
- boost::str(
- boost::format(
- "Axis index is not in range. Got %1% for Node %2% %3%")
- % axis
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Axis index is not in range. Got {} for Node {} {}",
+ axis,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
if (axis < 0)
@@ -2088,13 +2017,11 @@ ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, co
if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
{
throw armnn::ParseException(
- boost::str(
- boost::format(
- "The number of dimensions: %1% for input tensors of the "
- "Pack/Stack op. Number of dimensions should be less than %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % supportedNumDims
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of dimensions: {} for input tensors of the "
+ "Pack/Stack op. Number of dimensions should be less than {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ supportedNumDims,
+ CHECK_LOCATION().AsString()));
}
}
@@ -2132,13 +2059,11 @@ ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef
if (inputCount != 2)
{
throw ParseException(
- boost::str(
- boost::format(
- "The number of given input is %1%. It should be two for Transpose op."
- "Node %2% %3%")
- % inputCount
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of given input is {}. It should be two for Transpose op."
+ "Node {} {}",
+ inputCount,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -2178,25 +2103,22 @@ unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
if (rank != expectedRank)
{
throw ParseException(
- boost::str(
- boost::format(
- "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
- % expectedRank
- % rank
- % nodeName
- % CHECK_LOCATION().AsString()));
+ fmt::format("Expected the padding tensor to be of rank {} not {} on Node {} {}.",
+ expectedRank,
+ rank,
+ nodeName,
+ CHECK_LOCATION().AsString()));
}
unsigned int second = paddingTensor.GetShape()[1];
if (second != 2)
{
throw ParseException(
- boost::str(
- boost::format(
- "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
- % rank
- % second
- % nodeName
- % CHECK_LOCATION().AsString()));
+ fmt::format("Expected the padding tensor to be of dimensions "
+ "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
+ rank,
+ second,
+ nodeName,
+ CHECK_LOCATION().AsString()));
}
return rank;
}
@@ -2233,13 +2155,11 @@ ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports Pad with constant padding. "
- "Input %1%. Node %2% %3%")
- % inputs[1].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports Pad with constant padding. "
+ "Input {}. Node {} {}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* paddingTensorOp =
@@ -2266,14 +2186,12 @@ ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
if (paddingAmount < 0)
{
throw ParseException(
- boost::str(
- boost::format(
- "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
- % paddingAmount
- % i
- % j
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Negative amount {} specified at [{}, {}] of padding tensor on Node {} {}.",
+ paddingAmount,
+ i,
+ j,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
if (j == 0)
{
@@ -2322,13 +2240,11 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
if (concatDim == 0 || concatDim == 2)
{
throw ParseException(
- boost::str(
- boost::format(
- "Dimension %1% for concatenation is not supported by Armnn. "
- "Node %2% %3%")
- % concatDim
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Dimension {} for concatenation is not supported by Armnn. "
+ "Node {} {}",
+ concatDim,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
const unsigned int supportedNumDims = 4;
@@ -2347,13 +2263,11 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
{
throw armnn::ParseException(
- boost::str(
- boost::format(
- "The number of dimensions: %1% for input tensors of the "
- "concatenation op should be %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % supportedNumDims
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of dimensions: {} for input tensors of the "
+ "concatenation op should be {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ supportedNumDims,
+ CHECK_LOCATION().AsString()));
}
// Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
@@ -2395,12 +2309,10 @@ ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
if (tfDataType != tensorflow::DT_INT32)
{
throw ParseException(
- boost::str(
- boost::format(
- "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
- % tensorflow::DataType_Name(tfDataType)
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Armnn only supports DT_INT32 as out_type. Got {} for Node {} {}",
+ tensorflow::DataType_Name(tfDataType),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -2434,13 +2346,11 @@ ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports Reshape layers with constant shapes. "
- "Input %1% Node %2% %3%")
- % inputs[1].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports Reshape layers with constant shapes. "
+ "Input {} Node {} {}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* shapeNode =
PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
@@ -2472,13 +2382,11 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports ResizeBilinear layers with constant sizes. "
- "Input %1%. Node %2% %3%")
- % inputs[1].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports ResizeBilinear layers with constant sizes. "
+ "Input {}. Node {} {}",
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* sizeNode =
PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
@@ -2487,12 +2395,10 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no
if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
- "Node %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports ResizeBilinear layers with align_corners set to false. "
+ "Node {} {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
// Data for the parsed tensor args (size) must be stored locally.
@@ -2543,23 +2449,20 @@ TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo i
else
{
throw ParseException(
- boost::str(
- boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
- % tensorflow::DataType_Name(tfDataType)
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported DataType {} for Squeeze operation {} {}",
+ tensorflow::DataType_Name(tfDataType),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
if (inputTensorInfo.GetNumDimensions() > 4)
{
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
@@ -2585,12 +2488,10 @@ TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo i
if (outputDims.size() > 4)
{
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
- % outputDims.size()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
+ outputDims.size(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
@@ -2687,10 +2588,10 @@ ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, con
if (inputs.size() != 2)
{
throw ParseException(
- boost::str(boost::format("Mean expects two inputs!. Got %1% for Node %2% %3%")
- % inputs.size()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Mean expects two inputs!. Got {} for Node {} {}",
+ inputs.size(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
@@ -2772,11 +2673,9 @@ ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeD
if (it == m_InputShapes.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Missing input shape for Placeholder '%1%' %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Missing input shape for Placeholder '{}' {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
TensorInfo tensorInfo(it->second, DataType::Float32);
@@ -2888,13 +2787,11 @@ ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
if (splitDim == 0 || splitDim == 2)
{
throw armnn::ParseException(
- boost::str(
- boost::format(
- "Dimension %1% for split is not supported by Armnn. "
- "Node %2% %3%")
- % splitDim
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Dimension {} for split is not supported by Armnn. "
+ "Node {} {}",
+ splitDim,
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
// As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
@@ -2909,13 +2806,11 @@ ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
if (inputDimSize != supportedNumDims)
{
throw armnn::ParseException(
- boost::str(
- boost::format(
- "The number of dimensions: %1% for input tensors of the "
- "split op should be %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % supportedNumDims
- % CHECK_LOCATION().AsString()));
+ fmt::format("The number of dimensions: {} for input tensors of the "
+ "split op should be {} {}",
+ inputTensorInfo.GetNumDimensions(),
+ supportedNumDims,
+ CHECK_LOCATION().AsString()));
}
std::vector<unsigned int> splitterDimSizes(inputDimSize);
@@ -3066,12 +2961,10 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
if (inputs.size() != 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "2D Pooling expects one input!. Got %1% for Node %2% %3%")
- % inputs.size()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("2D Pooling expects one input!. Got {} for Node {} {}",
+ inputs.size(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
@@ -3153,11 +3046,9 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
if (layer == nullptr)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to add pooling2d layer for %1% %2%")
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Failed to add pooling2d layer for {} {}",
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
@@ -3184,14 +3075,12 @@ ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeD
if(input1Info.GetNumDimensions() != 1)
{
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported bias for BiasAdd. It should be a 1D vector. "
- "Got %1% dimensions for input %2%. Node %3% %4%")
- % input1Info.GetNumDimensions()
- % inputs[1].m_IndexedValue->GetNode().name()
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported bias for BiasAdd. It should be a 1D vector. "
+ "Got {} dimensions for input {}. Node {} {}",
+ input1Info.GetNumDimensions(),
+ inputs[1].m_IndexedValue->GetNode().name(),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
@@ -3386,15 +3275,13 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
else
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports fully connected layers with constant bias. "
- "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
- % addInputs[0].m_IndexedValue->GetNode().name()
- % addInputs[1].m_IndexedValue->GetNode().name()
- % addNodeDef->name()
- % matMulNodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports fully connected layers with constant bias. "
+ "Inputs {} and {}. AddNode {}. MatMulNode {} {}",
+ addInputs[0].m_IndexedValue->GetNode().name(),
+ addInputs[1].m_IndexedValue->GetNode().name(),
+ addNodeDef->name(),
+ matMulNodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
}
@@ -3418,14 +3305,12 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
else
{
throw ParseException(
- boost::str(
- boost::format(
- "ArmNN only supports fully connected layers with constant weights. "
- "Inputs %1% and %2%. MatMulNode %3% %4%")
- % mulInputs[0].m_IndexedValue->GetNode().name()
- % mulInputs[1].m_IndexedValue->GetNode().name()
- % matMulNodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("ArmNN only supports fully connected layers with constant weights. "
+ "Inputs {} and {}. MatMulNode {} {}",
+ mulInputs[0].m_IndexedValue->GetNode().name(),
+ mulInputs[1].m_IndexedValue->GetNode().name(),
+ matMulNodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
std::vector<float> weightTensorData;
@@ -3446,13 +3331,11 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
if (weights.GetShape()[1] != biases.GetShape()[0])
{
throw ParseException(
- boost::str(
- boost::format(
- "Shape of matmul weights and bias do not match. "
- "AddNode %1%. MatMulNode %2% %3%")
- % addNodeDef->name()
- % matMulNodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Shape of matmul weights and bias do not match. "
+ "AddNode {}. MatMulNode {} {}",
+ addNodeDef->name(),
+ matMulNodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
optionalBiases = Optional<ConstTensor>(biases);
@@ -3488,13 +3371,11 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
{
throw ParseException(
- boost::str(
- boost::format(
- "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
- "Got %1% for Node %2% %3%")
- % tensorflow::DataType_Name(type)
- % nodeDef.name()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
+ "Got {} for Node {} {}",
+ tensorflow::DataType_Name(type),
+ nodeDef.name(),
+ CHECK_LOCATION().AsString()));
}
const std::string& operation = nodeDef.op();
@@ -3515,7 +3396,7 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
auto it = m_ParsedTfOperations.find(nodeDef.name());
if (it != m_ParsedTfOperations.end())
{
- throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
+ throw ParseException(fmt::format("Name {} used by more than one node", nodeDef.name()));
}
m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
@@ -3539,11 +3420,9 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Unsupported operation %1% in tensorflow::GraphDef %2%")
- % operation
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unsupported operation {} in tensorflow::GraphDef {}",
+ operation,
+ CHECK_LOCATION().AsString()));
}
}
@@ -3568,11 +3447,9 @@ void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
if (nodeIt == m_NodesByName.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Couldn't find requested input node '%1%' in graph %2%")
- % requestedInputName
- % CHECK_LOCATION().AsString()));
+ fmt::format("Couldn't find requested input node '{}' in graph {}",
+ requestedInputName,
+ CHECK_LOCATION().AsString()));
}
}
@@ -3584,11 +3461,9 @@ void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
if (nodeIt == m_NodesByName.end())
{
throw ParseException(
- boost::str(
- boost::format(
- "Couldn't find requested output node '%1%' in graph %2%")
- % requestedOutputName
- % CHECK_LOCATION().AsString()));
+ fmt::format("Couldn't find requested output node '{}' in graph {}",
+ requestedOutputName,
+ CHECK_LOCATION().AsString()));
}
targetNodes.push_back(nodeIt->second);
}
@@ -3609,10 +3484,8 @@ void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
sortedNodes))
{
throw ParseException(
- boost::str(
- boost::format(
- "Cycle detected in graph %1%")
- % CHECK_LOCATION().AsString()));
+ fmt::format("Cycle detected in graph {}",
+ CHECK_LOCATION().AsString()));
}
// Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
@@ -3632,11 +3505,9 @@ INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
if (fd == nullptr)
{
throw FileNotFoundException(
- boost::str(
- boost::format(
- "Graph file %1% failed to open %2%")
- % graphFile
- % CHECK_LOCATION().AsString()));
+ fmt::format("Graph file {} failed to open {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
// Parses the file into a message.
@@ -3649,10 +3520,8 @@ INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
if (!success)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to parse graph file %1%")
- % CHECK_LOCATION().AsString()));
+ fmt::format("Failed to parse graph file {}",
+ CHECK_LOCATION().AsString()));
}
return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
@@ -3669,10 +3538,8 @@ INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
if (!success)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to parse graph file %1%")
- % CHECK_LOCATION().AsString()));
+ fmt::format("Failed to parse graph file {}",
+ CHECK_LOCATION().AsString()));
}
return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
@@ -3687,11 +3554,9 @@ INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
if (fd == nullptr)
{
throw FileNotFoundException(
- boost::str(
- boost::format(
- "Graph file %1% failed to open %2%")
- % graphFile
- % CHECK_LOCATION().AsString()));
+ fmt::format("Graph file {} failed to open {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
// Parses the file into a message.
@@ -3706,11 +3571,9 @@ INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
if (!success)
{
throw ParseException(
- boost::str(
- boost::format(
- "Failed to parse protobuf file %1% %2%")
- % graphFile
- % CHECK_LOCATION().AsString()));
+ fmt::format("Failed to parse protobuf file {} {}",
+ graphFile,
+ CHECK_LOCATION().AsString()));
}
return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
@@ -3726,10 +3589,8 @@ INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& grap
if (requestedOutputs.size() == 0)
{
throw ParseException(
- boost::str(
- boost::format(
- "requestedOutputs must have at least one entry %1%")
- % CHECK_LOCATION().AsString()));
+ fmt::format("requestedOutputs must have at least one entry {}",
+ CHECK_LOCATION().AsString()));
}
m_RequestedOutputs = requestedOutputs;
@@ -3775,12 +3636,10 @@ std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string
if (it == nameToBindingInfo.end())
{
throw InvalidArgumentException(
- boost::str(
- boost::format(
- "Unknown %1% '%2%' %3%")
- % bindingPointDesc
- % layerName
- % CHECK_LOCATION().AsString()));
+ fmt::format("Unknown {} '{}' {}",
+ bindingPointDesc,
+ layerName,
+ CHECK_LOCATION().AsString()));
}
return it->second;
}
@@ -3810,12 +3669,10 @@ void TfParser::TrackBindingPoint(IConnectableLayer* layer,
else
{
throw ParseException(
- boost::str(
- boost::format(
- "Id %1% used by more than one %2% layer %3%")
- % id
- % bindingPointDesc
- % CHECK_LOCATION().AsString()));
+ fmt::format("Id {} used by more than one {} layer {}",
+ id,
+ bindingPointDesc,
+ CHECK_LOCATION().AsString()));
}
}