aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2020-09-09 12:48:16 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-10-02 15:30:11 +0000
commit5b5c222f6b0c40a8e0f9ef9dedccd6f0f18c4c2c (patch)
tree6ad0552e5efeb7ae3474a7f5019bc2fb33fced03 /src
parent24ac85943b609e48fc36d16570ca4b5b90d31a6a (diff)
downloadarmnn-5b5c222f6b0c40a8e0f9ef9dedccd6f0f18c4c2c.tar.gz
IVGCVSW-5297 Remove boost::format from rest of ArmNN.
* Replacing calls to boost:format with fmt:format. * TensorUtils.cpp added outputShape.reserve call. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I4b2ed0f72039df824a2adca9309b8a9bbb158c5b
Diffstat (limited to 'src')
-rw-r--r--src/armnn/Graph.cpp13
-rw-r--r--src/armnn/Layer.cpp37
-rw-r--r--src/armnn/LoadedNetwork.cpp26
-rw-r--r--src/armnnConverter/ArmnnConverter.cpp20
-rw-r--r--src/armnnDeserializer/Deserializer.cpp203
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp16
-rw-r--r--src/armnnUtils/ParserHelper.cpp16
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp57
-rw-r--r--src/armnnUtils/TensorIOUtils.hpp25
-rw-r--r--src/armnnUtils/TensorUtils.cpp12
-rw-r--r--src/armnnUtils/VerificationHelpers.cpp36
11 files changed, 204 insertions, 257 deletions
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index a497a45da9..30546e1f0a 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -16,7 +16,7 @@
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <unordered_map>
#include <DotSerializer.hpp>
@@ -312,12 +312,11 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
// A copy layer is needed in between the source and destination layers.
// Record the operation rather than attempting to modify the graph as we go.
// (invalidating iterators)
- const std::string compLayerName = boost::str(boost::format("[ %1% (%2%) -> %3% (%4%) ]")
- % srcLayer->GetName()
- % srcOutputIndex
- % dstLayer.GetName()
- % dstInputSlot->GetSlotIndex());
-
+ const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
+ srcLayer->GetName(),
+ srcOutputIndex,
+ dstLayer.GetName(),
+ dstInputSlot->GetSlotIndex());
Layer* compLayer = nullptr;
if (strategy == EdgeStrategy::CopyToTarget)
{
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index c8d5a1690d..d06b0459f6 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -10,7 +10,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <numeric>
@@ -159,8 +159,7 @@ void OutputSlot::ValidateConnectionIndex(unsigned int index) const
{
if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
{
- throw InvalidArgumentException(
- boost::str(boost::format("GetConnection: Invalid index %1% provided") % index));
+ throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
}
}
@@ -350,14 +349,12 @@ void Layer::VerifyLayerConnections(unsigned int expectedConnections, const Check
if (GetInputSlot(i).GetConnection() == nullptr)
{
throw LayerValidationException(
- boost::str(
- boost::format(
- "Input connection #%1% must be connected "
- "for %2% layer %3% %4%")
- % i
- % GetLayerTypeAsCString(this->GetType())
- % GetNameStr()
- % location.AsString()));
+ fmt::format("Input connection #{0} must be connected "
+ "for {1} layer {2} {3}",
+ i,
+ GetLayerTypeAsCString(this->GetType()),
+ GetNameStr(),
+ location.AsString()));
}
}
}
@@ -375,16 +372,14 @@ std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>
if (GetNumInputSlots() != GetNumOutputSlots())
{
throw UnimplementedException(
- boost::str(
- boost::format(
- "Default implementation for InferOutputShapes can only be used for "
- "layers with the same number of input and output slots. This doesn't "
- "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
- % GetLayerTypeAsCString(this->GetType())
- % GetNameStr()
- % GetNumInputSlots()
- % GetNumOutputSlots()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Default implementation for InferOutputShapes can only be used for "
+ "layers with the same number of input and output slots. This doesn't "
+ "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
+ GetLayerTypeAsCString(this->GetType()),
+ GetNameStr(),
+ GetNumInputSlots(),
+ GetNumOutputSlots(),
+ CHECK_LOCATION().AsString()));
}
return inputShapes;
}
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 33625744c5..00ac90b121 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -8,7 +8,6 @@
#include "Graph.hpp"
#include "Network.hpp"
#include <Processes.hpp>
-#include "Runtime.hpp"
#include "Profiling.hpp"
#include "HeapProfiling.hpp"
@@ -23,7 +22,7 @@
#include <LabelsAndEventClasses.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
namespace armnn
{
@@ -236,9 +235,9 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
{
const char* const layerName =
layer->GetNameStr().length() != 0 ? layer->GetName() : "<Unnamed>";
- throw InvalidArgumentException(boost::str(
- boost::format("No workload created for layer (name: '%1%' type: '%2%') (compute '%3%')")
- % layerName % static_cast<int>(layer->GetType()) % layer->GetBackendId().Get()
+ throw InvalidArgumentException(
+ fmt::format("No workload created for layer (name: '{0}' type: '{1}') (compute '{2}')",
+ layerName, static_cast<int>(layer->GetType()), layer->GetBackendId().Get()
));
}
@@ -325,7 +324,7 @@ TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
}
}
- throw InvalidArgumentException(boost::str(boost::format("No input layer is associated with id %1%") % layerId));
+ throw InvalidArgumentException(fmt::format("No input layer is associated with id {}", layerId));
}
TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
@@ -340,7 +339,7 @@ TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
}
}
- throw InvalidArgumentException(boost::str(boost::format("No output layer is associated with id %1%") % layerId));
+ throw InvalidArgumentException(fmt::format("No output layer is associated with id {}", layerId));
}
const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) const
@@ -350,12 +349,10 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
auto it = m_WorkloadFactories.find(layer.GetBackendId());
if (it == m_WorkloadFactories.end())
{
- throw RuntimeException(
- boost::str(
- boost::format("No workload factory for %1% to be used for layer: %2%")
- % layer.GetBackendId().Get()
- % layer.GetNameStr()),
- CHECK_LOCATION());
+ throw RuntimeException(fmt::format("No workload factory for {0} to be used for layer: {1}",
+ layer.GetBackendId().Get(),
+ layer.GetNameStr()),
+ CHECK_LOCATION());
}
workloadFactory = it->second.first.get();
@@ -411,8 +408,7 @@ static const TensorPin& GetTensorPin(LayerBindingId id,
}
else
{
- throw InvalidArgumentException(boost::str(
- boost::format("No tensor supplied for %1% %2%") % bindingPointDesc % id));
+ throw InvalidArgumentException(fmt::format("No tensor supplied for {0} {1}", bindingPointDesc, id));
}
}
diff --git a/src/armnnConverter/ArmnnConverter.cpp b/src/armnnConverter/ArmnnConverter.cpp
index 21b89ea6f8..c8b9ba9f28 100644
--- a/src/armnnConverter/ArmnnConverter.cpp
+++ b/src/armnnConverter/ArmnnConverter.cpp
@@ -24,7 +24,7 @@
#include <armnn/utility/NumericCast.hpp>
#include "armnn/utility/StringUtils.hpp"
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <boost/program_options.hpp>
#include <cstdlib>
@@ -265,9 +265,9 @@ private:
const size_t numInputBindings = m_InputNames.size();
if (numInputShapes < numInputBindings)
{
- throw armnn::Exception(boost::str(boost::format(
- "Not every input has its tensor shape specified: expected=%1%, got=%2%")
- % numInputBindings % numInputShapes));
+ throw armnn::Exception(fmt::format(
+ "Not every input has its tensor shape specified: expected={0}, got={1}",
+ numInputBindings, numInputShapes));
}
for (size_t i = 0; i < numInputShapes; i++)
@@ -298,9 +298,9 @@ private:
const size_t numInputBindings = m_InputNames.size();
if (numInputShapes < numInputBindings)
{
- throw armnn::Exception(boost::str(boost::format(
- "Not every input has its tensor shape specified: expected=%1%, got=%2%")
- % numInputBindings % numInputShapes));
+ throw armnn::Exception(fmt::format(
+ "Not every input has its tensor shape specified: expected={0}, got={1}",
+ numInputBindings, numInputShapes));
}
}
@@ -325,9 +325,9 @@ private:
const size_t numInputBindings = m_InputNames.size();
if (numInputShapes < numInputBindings)
{
- throw armnn::Exception(boost::str(boost::format(
- "Not every input has its tensor shape specified: expected=%1%, got=%2%")
- % numInputBindings % numInputShapes));
+ throw armnn::Exception(fmt::format(
+ "Not every input has its tensor shape specified: expected={0}, got={1}",
+ numInputBindings, numInputShapes));
}
}
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 4c2f2f1397..1b62484d3e 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -20,7 +20,7 @@
#include <ParserHelper.hpp>
#include <VerificationHelpers.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <fstream>
#include <algorithm>
@@ -45,24 +45,19 @@ const uint32_t VIRTUAL_LAYER_ID = std::numeric_limits<uint32_t>::max();
{
if (graph->layers() == nullptr)
{
- throw ParseException(
- boost::str(
- boost::format("%1% was called with invalid (null) graph. "
- "Possible reason is that the graph is not yet loaded and Unpack(ed). "
- "layers:%2% at %3%") %
- location.m_Function %
- layersIndex %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} was called with invalid (null) graph. "
+ "Possible reason is that the graph is not yet loaded and Unpack(ed). "
+ "layers:{1} at {2}",
+ location.m_Function,
+ layersIndex,
+ location.FileLine()));
}
else if (layersIndex >= graph->layers()->size())
{
- throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid layers index. "
- "layers:%2% at %3%") %
- location.m_Function %
- layersIndex %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} was called with an invalid layers index. layers:{1} at {2}",
+ location.m_Function,
+ layersIndex,
+ location.FileLine()));
}
}
@@ -73,36 +68,30 @@ void CheckLayers(const Deserializer::GraphPtr& graph,
{
if (graph->layers() == nullptr)
{
- throw ParseException(
- boost::str(
- boost::format("%1% was called with invalid (null) graph. "
- "Possible reason is that the graph is not yet loaded and Unpack(ed). "
- "layers:%2% at %3%") %
- location.m_Function %
- layersIndex %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} was called with invalid (null) graph. "
+ "Possible reason is that the graph is not yet loaded and Unpack(ed). "
+ "layers:{1} at {2}",
+ location.m_Function,
+ layersIndex,
+ location.FileLine()));
}
else if (layersIndex >= graph->layers()->size())
{
- throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid layers index. "
- "layers:%2% at %3%") %
- location.m_Function %
- layersIndex %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} was called with an invalid layers index. "
+ "layers:{1} at {2}",
+ location.m_Function,
+ layersIndex,
+ location.FileLine()));
}
else if (layerIndex >= graph->layers()[layersIndex].size()
&& layerIndex != VIRTUAL_LAYER_ID)
{
- throw ParseException(
- boost::str(
- boost::format("%1% was called with an invalid layer index. "
- "layers:%2% layer:%3% at %4%") %
- location.m_Function %
- layersIndex %
- layerIndex %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} was called with an invalid layer index. "
+ "layers:{1} layer:{2} at {3}",
+ location.m_Function,
+ layersIndex,
+ layerIndex,
+ location.FileLine()));
}
}
@@ -111,13 +100,9 @@ void CheckTensorPtr(Deserializer::TensorRawPtr rawPtr,
{
if (rawPtr == nullptr)
{
- throw ParseException(
- boost::str(
- boost::format("%1% was called with a null tensor pointer. "
- "at %2%") %
- location.m_Function %
- location.FileLine()));
-
+ throw ParseException(fmt::format("{0} was called with a null tensor pointer. at {1}",
+ location.m_Function,
+ location.FileLine()));
}
}
@@ -126,9 +111,9 @@ void CheckConstTensorPtr(Deserializer::ConstTensorRawPtr rawPtr,
{
if (rawPtr == nullptr)
{
- throw ParseException(boost::str(boost::format("%1% was called with a null const tensor pointer. at %2%") %
- location.m_Function %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} was called with a null const tensor pointer. at {1}",
+ location.m_Function,
+ location.FileLine()));
}
}
@@ -138,9 +123,9 @@ void CheckConstTensorSize(const unsigned int constTensorSize,
{
if (constTensorSize != tensorSize)
{
- throw ParseException(boost::str(boost::format("%1% wrong number of components supplied to tensor. at:%2%") %
- location.m_Function %
- location.FileLine()));
+ throw ParseException(fmt::format("{0} wrong number of components supplied to tensor. at:{1}",
+ location.m_Function,
+ location.FileLine()));
}
}
@@ -368,9 +353,7 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_TransposeLayer()->base();
case Layer::Layer_NONE:
default:
- throw ParseException(boost::str(
- boost::format("Layer type %1% not recognized") %
- layerType));
+ throw ParseException(fmt::format("Layer type {} not recognized", layerType));
}
}
@@ -539,12 +522,10 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
default:
{
CheckLocation location = CHECK_LOCATION();
- throw ParseException(
- boost::str(
- boost::format("Unsupported data type %1% = %2%. %3%") %
- tensorPtr->dataType() %
- EnumNameDataType(tensorPtr->dataType()) %
- location.AsString()));
+ throw ParseException(fmt::format("Unsupported data type {0} = {1}. {2}",
+ tensorPtr->dataType(),
+ EnumNameDataType(tensorPtr->dataType()),
+ location.AsString()));
}
}
@@ -624,11 +605,10 @@ armnn::ConstTensor ToConstTensor(Deserializer::ConstTensorRawPtr constTensorPtr)
default:
{
CheckLocation location = CHECK_LOCATION();
- throw ParseException(
- boost::str(boost::format("Unsupported data type %1% = %2%. %3%") %
- constTensorPtr->data_type() %
- EnumNameConstTensorData(constTensorPtr->data_type()) %
- location.AsString()));
+ throw ParseException(fmt::format("Unsupported data type {0} = {1}. {2}",
+ constTensorPtr->data_type(),
+ EnumNameConstTensorData(constTensorPtr->data_type()),
+ location.AsString()));
}
}
}
@@ -671,14 +651,11 @@ void Deserializer::ParseUnsupportedLayer(GraphPtr graph, unsigned int layerIndex
{
CHECK_LAYERS(graph, 0, layerIndex);
const auto layerName = GetBaseLayer(graph, layerIndex)->layerName()->c_str();
- throw ParseException(
- boost::str(
- boost::format("Layer not supported. "
- "layerIndex: %1% "
- "layerName: %2% / %3%") %
- layerIndex %
- layerName %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Layer not supported. layerIndex: {0} "
+ "layerName: {1} / {2}",
+ layerIndex,
+ layerName,
+ CHECK_LOCATION().AsString()));
}
void Deserializer::ResetParser()
@@ -722,17 +699,16 @@ Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(const uint8_t* binaryCo
{
if (binaryContent == nullptr)
{
- throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
- CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
+ CHECK_LOCATION().AsString()));
}
flatbuffers::Verifier verifier(binaryContent, len);
if (verifier.VerifyBuffer<SerializedGraph>() == false)
{
- throw ParseException(
- boost::str(boost::format("Buffer doesn't conform to the expected Armnn "
- "flatbuffers format. size:%1% %2%") %
- len %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Buffer doesn't conform to the expected Armnn "
+ "flatbuffers format. size:{0} {1}",
+ len,
+ CHECK_LOCATION().AsString()));
}
return GetSerializedGraph(binaryContent);
}
@@ -789,11 +765,9 @@ BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerInde
return inputBinding.second;
}
}
- throw ParseException(
- boost::str(
- boost::format("No input binding found for layer:%1% / %2%") %
- name %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("No input binding found for layer:{0} / {1}",
+ name,
+ CHECK_LOCATION().AsString()));
}
BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
@@ -807,11 +781,9 @@ BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerInd
return outputBinding.second;
}
}
- throw ParseException(
- boost::str(
- boost::format("No output binding found for layer:%1% / %2%") %
- name %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("No output binding found for layer:{0} / {1}",
+ name,
+ CHECK_LOCATION().AsString()));
}
unsigned int Deserializer::GetInputLayerInVector(GraphPtr graph, int targetId)
@@ -963,13 +935,12 @@ void Deserializer::RegisterOutputSlots(GraphPtr graph,
LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
if (baseLayer->outputSlots()->size() != layer->GetNumOutputSlots())
{
- throw ParseException(
- boost::str(boost::format("The number of outputslots (%1%) does not match the number expected (%2%)"
- " for layer index: %3% %4%") %
- baseLayer->outputSlots()->size() %
- layer->GetNumOutputSlots() %
- layerIndex %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The number of outputslots ({0}) does not match the number expected ({1})"
+ " for layer index: {2} {3}",
+ baseLayer->outputSlots()->size(),
+ layer->GetNumOutputSlots(),
+ layerIndex,
+ CHECK_LOCATION().AsString()));
}
for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
@@ -990,13 +961,12 @@ void Deserializer::RegisterInputSlots(GraphPtr graph,
LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
if (baseLayer->inputSlots()->size() != layer->GetNumInputSlots())
{
- throw ParseException(
- boost::str(boost::format("The number of inputslots (%1%) does not match the number expected (%2%)"
- " for layer index:%3% %4%") %
- baseLayer->inputSlots()->size() %
- layer->GetNumInputSlots() %
- layerIndex %
- CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The number of inputslots ({0}) does not match the number expected ({1})"
+ " for layer index:{2} {3}",
+ baseLayer->inputSlots()->size(),
+ layer->GetNumInputSlots(),
+ layerIndex,
+ CHECK_LOCATION().AsString()));
}
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -1157,8 +1127,7 @@ void Deserializer::ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex)
if (flatBufferCrops->Length() % 2 != 0)
{
- throw ParseException(boost::str(
- boost::format("The size of crops must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The size of crops must be divisible by 2 {}", CHECK_LOCATION().AsString()));
}
std::vector<std::pair<unsigned int, unsigned int>> crops;
@@ -1814,8 +1783,8 @@ void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex)
if (flatBufferPadList->Length() % 2 != 0)
{
- throw ParseException(boost::str(
- boost::format("The size of the pad list must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The size of the pad list must be divisible by 2 {}",
+ CHECK_LOCATION().AsString()));
}
std::vector<std::pair<unsigned int, unsigned int>> padList;
@@ -2001,8 +1970,8 @@ armnn::TensorInfo Deserializer::OutputShapeOfReshape(const armnn::TensorInfo& in
{
if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
{
- throw ParseException(boost::str(
- boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("At most one component of shape can be -1 {}",
+ CHECK_LOCATION().AsString()));
}
auto targetNumElements =
@@ -2183,8 +2152,8 @@ void Deserializer::ParseSpaceToBatchNd(GraphPtr graph, unsigned int layerIndex)
if (flatBufferPadList->Length() % 2 != 0)
{
- throw ParseException(boost::str(
- boost::format("The size of the pad list must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The size of the pad list must be divisible by 2 {}",
+ CHECK_LOCATION().AsString()));
}
std::vector<std::pair<unsigned int, unsigned int>> padList;
@@ -2367,8 +2336,8 @@ void Deserializer::ParseSlice(GraphPtr graph, unsigned int layerIndex)
if (fbBegin->Length() != fbSize->Length())
{
- throw ParseException(boost::str(
- boost::format("Begin and size descriptors must have the same length %1%") % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("Begin and size descriptors must have the same length {}",
+ CHECK_LOCATION().AsString()));
}
armnn::SliceDescriptor descriptor;
@@ -2404,8 +2373,8 @@ void Deserializer::ParseStridedSlice(GraphPtr graph, unsigned int layerIndex)
if (!(flatBufferBegin->Length() == flatBufferEnd->Length() &&
flatBufferBegin->Length() == flatBufferStride->Length()))
{
- throw ParseException(boost::str(
- boost::format("The size of the begin, end, and stride must be equal %1%") % CHECK_LOCATION().AsString()));
+ throw ParseException(fmt::format("The size of the begin, end, and stride must be equal {}",
+ CHECK_LOCATION().AsString()));
}
std::vector<int> begin(flatBufferBegin->begin(), flatBufferBegin->end());
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index bb38d5f4b4..cea6a43454 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -18,7 +18,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <ResolveType.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
using armnnDeserializer::IDeserializer;
@@ -68,14 +68,12 @@ struct ParserFlatbuffersSerializeFixture
if (ret != armnn::Status::Success)
{
- throw armnn::Exception(
- boost::str(
- boost::format("The runtime failed to load the network. "
- "Error was: %1%. in %2% [%3%:%4%]") %
- errorMessage %
- __func__ %
- __FILE__ %
- __LINE__));
+ throw armnn::Exception(fmt::format("The runtime failed to load the network. "
+ "Error was: {0}. in {1} [{2}:{3}]",
+ errorMessage,
+ __func__,
+ __FILE__,
+ __LINE__));
}
}
diff --git a/src/armnnUtils/ParserHelper.cpp b/src/armnnUtils/ParserHelper.cpp
index 9709773014..af8014d112 100644
--- a/src/armnnUtils/ParserHelper.cpp
+++ b/src/armnnUtils/ParserHelper.cpp
@@ -8,7 +8,7 @@
#include <armnn/Descriptors.hpp>
#include <armnnUtils/Permute.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
namespace armnnUtils
{
@@ -27,14 +27,12 @@ void ProcessConcatInputTensorInfo(armnn::TensorInfo& inputTensorInfo,
// double check dimensions of the tensors
if (inputTensorInfo.GetNumDimensions() != inputRank)
{
- throw armnn::ParseException(
- boost::str(
- boost::format(
- "The number of dimensions: %1% for input tensors of the "
- "concatenation op should be %2% %3%")
- % inputTensorInfo.GetNumDimensions()
- % inputRank
- % CHECK_LOCATION().AsString()));
+ throw armnn::ParseException(fmt::format(
+ "The number of dimensions: {0} for input tensors of the "
+ "concatenation op should be {1} {2}",
+ inputTensorInfo.GetNumDimensions(),
+ inputRank,
+ CHECK_LOCATION().AsString()));
}
for (unsigned int j = 0; j < concatAxis; ++j)
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 8356117e1f..cf28fcf513 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -6,13 +6,13 @@
#pragma once
#include <armnn/IRuntime.hpp>
-
#include <test/TensorHelpers.hpp>
#include <Network.hpp>
#include <VerificationHelpers.hpp>
#include <boost/format.hpp>
+#include <fmt/format.h>
#include <iomanip>
#include <string>
@@ -129,10 +129,9 @@ void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::Te
armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
- throw armnn::Exception(boost::str(
- boost::format("LoadNetwork failed with error: '%1%' %2%")
- % errorMessage
- % CHECK_LOCATION().AsString()));
+ throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
+ errorMessage,
+ CHECK_LOCATION().AsString()));
}
}
@@ -147,10 +146,9 @@ void ParserPrototxtFixture<TParser>::Setup()
armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
if (ret != armnn::Status::Success)
{
- throw armnn::Exception(boost::str(
- boost::format("LoadNetwork failed with error: '%1%' %2%")
- % errorMessage
- % CHECK_LOCATION().AsString()));
+ throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
+ errorMessage,
+ CHECK_LOCATION().AsString()));
}
}
@@ -214,13 +212,12 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
if (bindingInfo.second.GetNumElements() != it.second.size())
{
- throw armnn::Exception(
- boost::str(boost::format("Output tensor %1% is expected to have %2% elements. "
- "%3% elements supplied. %4%") %
- it.first %
- bindingInfo.second.GetNumElements() %
- it.second.size() %
- CHECK_LOCATION().AsString()));
+ throw armnn::Exception(fmt::format("Output tensor {0} is expected to have {1} elements. "
+ "{2} elements supplied. {3}",
+ it.first,
+ bindingInfo.second.GetNumElements(),
+ it.second.size(),
+ CHECK_LOCATION().AsString()));
}
// If the expected output shape is set, the output tensor checks will be carried out.
@@ -234,25 +231,25 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
{
if (m_SingleOutputShape[i] != bindingInfo.second.GetShape()[i])
{
- throw armnn::Exception(
- boost::str(boost::format("Output tensor %1% is expected to have %2% shape. "
- "%3% shape supplied. %4%") %
- it.first %
- bindingInfo.second.GetShape() %
- m_SingleOutputShape %
- CHECK_LOCATION().AsString()));
+ // This exception message could not be created by fmt:format because of an oddity in
+ // the operator << of TensorShape.
+ std::stringstream message;
+ message << "Output tensor " << it.first << " is expected to have "
+ << bindingInfo.second.GetShape() << "shape. "
+ << m_SingleOutputShape << " shape supplied. "
+ << CHECK_LOCATION().AsString();
+ throw armnn::Exception(message.str());
}
}
}
else
{
- throw armnn::Exception(
- boost::str(boost::format("Output tensor %1% is expected to have %2% dimensions. "
- "%3% dimensions supplied. %4%") %
- it.first %
- bindingInfo.second.GetShape().GetNumDimensions() %
- NumOutputDimensions %
- CHECK_LOCATION().AsString()));
+ throw armnn::Exception(fmt::format("Output tensor {0} is expected to have {1} dimensions. "
+ "{2} dimensions supplied. {3}",
+ it.first,
+ bindingInfo.second.GetShape().GetNumDimensions(),
+ NumOutputDimensions,
+ CHECK_LOCATION().AsString()));
}
}
diff --git a/src/armnnUtils/TensorIOUtils.hpp b/src/armnnUtils/TensorIOUtils.hpp
index 098b4dadec..b06bb7132b 100644
--- a/src/armnnUtils/TensorIOUtils.hpp
+++ b/src/armnnUtils/TensorIOUtils.hpp
@@ -7,7 +7,7 @@
#include <armnn/Tensor.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <mapbox/variant.hpp>
namespace armnnUtils
@@ -22,10 +22,10 @@ inline armnn::InputTensors MakeInputTensors(const std::vector<armnn::BindingPoin
const size_t numInputs = inputBindings.size();
if (numInputs != inputDataContainers.size())
{
- throw armnn::Exception(boost::str(boost::format("The number of inputs does not match number of "
- "tensor data containers: %1% != %2%")
- % numInputs
- % inputDataContainers.size()));
+ throw armnn::Exception(fmt::format("The number of inputs does not match number of "
+ "tensor data containers: {0} != {1}",
+ numInputs,
+ inputDataContainers.size()));
}
for (size_t i = 0; i < numInputs; i++)
@@ -37,10 +37,9 @@ inline armnn::InputTensors MakeInputTensors(const std::vector<armnn::BindingPoin
{
if (value.size() != inputBinding.second.GetNumElements())
{
- throw armnn::Exception(boost::str(boost::format("The input tensor has incorrect size "
- "(expected %1% got %2%)")
- % inputBinding.second.GetNumElements()
- % value.size()));
+ throw armnn::Exception(fmt::format("The input tensor has incorrect size (expected {0} got {1})",
+ inputBinding.second.GetNumElements(),
+ value.size()));
}
armnn::ConstTensor inputTensor(inputBinding.second, value.data());
@@ -61,10 +60,10 @@ inline armnn::OutputTensors MakeOutputTensors(const std::vector<armnn::BindingPo
const size_t numOutputs = outputBindings.size();
if (numOutputs != outputDataContainers.size())
{
- throw armnn::Exception(boost::str(boost::format("Number of outputs does not match number of "
- "tensor data containers: %1% != %2%")
- % numOutputs
- % outputDataContainers.size()));
+ throw armnn::Exception(fmt::format("Number of outputs does not match number"
+ "of tensor data containers: {0} != {1}",
+ numOutputs,
+ outputDataContainers.size()));
}
for (size_t i = 0; i < numOutputs; i++)
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index adaf8114f1..2890399cd8 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -9,7 +9,7 @@
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
using namespace armnn;
@@ -88,11 +88,10 @@ TensorShape ExpandDims(const TensorShape& tensorShape, int axis)
if (axis < -armnn::numeric_cast<int>(outputDim) || axis > armnn::numeric_cast<int>(tensorShape.GetNumDimensions()))
{
- throw InvalidArgumentException(
- boost::str(boost::format("Invalid expansion axis %1% for %2%D input tensor. %3%") %
- axis %
- tensorShape.GetNumDimensions() %
- CHECK_LOCATION().AsString()));
+ throw InvalidArgumentException(fmt::format("Invalid expansion axis {} for {}D input tensor. {}",
+ axis,
+ tensorShape.GetNumDimensions(),
+ CHECK_LOCATION().AsString()));
}
if (axis < 0)
@@ -101,6 +100,7 @@ TensorShape ExpandDims(const TensorShape& tensorShape, int axis)
}
std::vector<unsigned int> outputShape;
+ outputShape.reserve(tensorShape.GetNumDimensions());
for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
{
outputShape.push_back(tensorShape[i]);
diff --git a/src/armnnUtils/VerificationHelpers.cpp b/src/armnnUtils/VerificationHelpers.cpp
index 243d22e444..a4db97adf4 100644
--- a/src/armnnUtils/VerificationHelpers.cpp
+++ b/src/armnnUtils/VerificationHelpers.cpp
@@ -4,9 +4,10 @@
//
#include "VerificationHelpers.hpp"
-#include <boost/format.hpp>
#include <armnn/Exceptions.hpp>
+#include <fmt/format.h>
+
using namespace armnn;
namespace armnnUtils
@@ -23,13 +24,11 @@ void CheckValidSize(std::initializer_list<size_t> validInputCounts,
[&actualValue](size_t x) { return x == actualValue; } );
if (!isValid)
{
- throw ParseException(
- boost::str(
- boost::format("%1% = %2% is not valid, not in {%3%}. %4%") %
- actualExpr %
- actualValue %
- validExpr %
- location.AsString()));
+ throw ParseException(fmt::format("{} = {} is not valid, not in {{}}. {}",
+ actualExpr,
+ actualValue,
+ validExpr,
+ location.AsString()));
}
}
@@ -39,12 +38,10 @@ uint32_t NonNegative(const char* expr,
{
if (value < 0)
{
- throw ParseException(
- boost::str(
- boost::format("'%1%' must be non-negative, received: %2% at %3%") %
- expr %
- value %
- location.AsString() ));
+ throw ParseException(fmt::format("'{}' must be non-negative, received: {} at {}",
+ expr,
+ value,
+ location.AsString()));
}
else
{
@@ -58,12 +55,11 @@ int32_t VerifyInt32(const char* expr,
{
if (value < std::numeric_limits<int>::min() || value > std::numeric_limits<int>::max())
{
- throw ParseException(
- boost::str(
- boost::format("'%1%' must should fit into a int32 (ArmNN don't support int64), received: %2% at %3%") %
- expr %
- value %
- location.AsString() ));
+ throw ParseException(fmt::format("'{}' must should fit into a int32 (ArmNN don't support int64),"
+ " received: {} at {}",
+ expr,
+ value,
+ location.AsString()));
}
else
{