aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2020-09-09 12:48:16 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-10-02 15:30:11 +0000
commit5b5c222f6b0c40a8e0f9ef9dedccd6f0f18c4c2c (patch)
tree6ad0552e5efeb7ae3474a7f5019bc2fb33fced03 /src/armnn
parent24ac85943b609e48fc36d16570ca4b5b90d31a6a (diff)
downloadarmnn-5b5c222f6b0c40a8e0f9ef9dedccd6f0f18c4c2c.tar.gz
IVGCVSW-5297 Remove boost::format from rest of ArmNN.
* Replacing calls to boost:format with fmt:format. * TensorUtils.cpp added outputShape.reserve call. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I4b2ed0f72039df824a2adca9309b8a9bbb158c5b
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Graph.cpp13
-rw-r--r--src/armnn/Layer.cpp37
-rw-r--r--src/armnn/LoadedNetwork.cpp26
3 files changed, 33 insertions, 43 deletions
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index a497a45da9..30546e1f0a 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -16,7 +16,7 @@
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <unordered_map>
#include <DotSerializer.hpp>
@@ -312,12 +312,11 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
// A copy layer is needed in between the source and destination layers.
// Record the operation rather than attempting to modify the graph as we go.
// (invalidating iterators)
- const std::string compLayerName = boost::str(boost::format("[ %1% (%2%) -> %3% (%4%) ]")
- % srcLayer->GetName()
- % srcOutputIndex
- % dstLayer.GetName()
- % dstInputSlot->GetSlotIndex());
-
+ const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
+ srcLayer->GetName(),
+ srcOutputIndex,
+ dstLayer.GetName(),
+ dstInputSlot->GetSlotIndex());
Layer* compLayer = nullptr;
if (strategy == EdgeStrategy::CopyToTarget)
{
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index c8d5a1690d..d06b0459f6 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -10,7 +10,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
#include <numeric>
@@ -159,8 +159,7 @@ void OutputSlot::ValidateConnectionIndex(unsigned int index) const
{
if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
{
- throw InvalidArgumentException(
- boost::str(boost::format("GetConnection: Invalid index %1% provided") % index));
+ throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
}
}
@@ -350,14 +349,12 @@ void Layer::VerifyLayerConnections(unsigned int expectedConnections, const Check
if (GetInputSlot(i).GetConnection() == nullptr)
{
throw LayerValidationException(
- boost::str(
- boost::format(
- "Input connection #%1% must be connected "
- "for %2% layer %3% %4%")
- % i
- % GetLayerTypeAsCString(this->GetType())
- % GetNameStr()
- % location.AsString()));
+ fmt::format("Input connection #{0} must be connected "
+ "for {1} layer {2} {3}",
+ i,
+ GetLayerTypeAsCString(this->GetType()),
+ GetNameStr(),
+ location.AsString()));
}
}
}
@@ -375,16 +372,14 @@ std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>
if (GetNumInputSlots() != GetNumOutputSlots())
{
throw UnimplementedException(
- boost::str(
- boost::format(
- "Default implementation for InferOutputShapes can only be used for "
- "layers with the same number of input and output slots. This doesn't "
- "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
- % GetLayerTypeAsCString(this->GetType())
- % GetNameStr()
- % GetNumInputSlots()
- % GetNumOutputSlots()
- % CHECK_LOCATION().AsString()));
+ fmt::format("Default implementation for InferOutputShapes can only be used for "
+ "layers with the same number of input and output slots. This doesn't "
+ "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
+ GetLayerTypeAsCString(this->GetType()),
+ GetNameStr(),
+ GetNumInputSlots(),
+ GetNumOutputSlots(),
+ CHECK_LOCATION().AsString()));
}
return inputShapes;
}
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 33625744c5..00ac90b121 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -8,7 +8,6 @@
#include "Graph.hpp"
#include "Network.hpp"
#include <Processes.hpp>
-#include "Runtime.hpp"
#include "Profiling.hpp"
#include "HeapProfiling.hpp"
@@ -23,7 +22,7 @@
#include <LabelsAndEventClasses.hpp>
-#include <boost/format.hpp>
+#include <fmt/format.h>
namespace armnn
{
@@ -236,9 +235,9 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
{
const char* const layerName =
layer->GetNameStr().length() != 0 ? layer->GetName() : "<Unnamed>";
- throw InvalidArgumentException(boost::str(
- boost::format("No workload created for layer (name: '%1%' type: '%2%') (compute '%3%')")
- % layerName % static_cast<int>(layer->GetType()) % layer->GetBackendId().Get()
+ throw InvalidArgumentException(
+ fmt::format("No workload created for layer (name: '{0}' type: '{1}') (compute '{2}')",
+ layerName, static_cast<int>(layer->GetType()), layer->GetBackendId().Get()
));
}
@@ -325,7 +324,7 @@ TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
}
}
- throw InvalidArgumentException(boost::str(boost::format("No input layer is associated with id %1%") % layerId));
+ throw InvalidArgumentException(fmt::format("No input layer is associated with id {}", layerId));
}
TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
@@ -340,7 +339,7 @@ TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
}
}
- throw InvalidArgumentException(boost::str(boost::format("No output layer is associated with id %1%") % layerId));
+ throw InvalidArgumentException(fmt::format("No output layer is associated with id {}", layerId));
}
const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) const
@@ -350,12 +349,10 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
auto it = m_WorkloadFactories.find(layer.GetBackendId());
if (it == m_WorkloadFactories.end())
{
- throw RuntimeException(
- boost::str(
- boost::format("No workload factory for %1% to be used for layer: %2%")
- % layer.GetBackendId().Get()
- % layer.GetNameStr()),
- CHECK_LOCATION());
+ throw RuntimeException(fmt::format("No workload factory for {0} to be used for layer: {1}",
+ layer.GetBackendId().Get(),
+ layer.GetNameStr()),
+ CHECK_LOCATION());
}
workloadFactory = it->second.first.get();
@@ -411,8 +408,7 @@ static const TensorPin& GetTensorPin(LayerBindingId id,
}
else
{
- throw InvalidArgumentException(boost::str(
- boost::format("No tensor supplied for %1% %2%") % bindingPointDesc % id));
+ throw InvalidArgumentException(fmt::format("No tensor supplied for {0} {1}", bindingPointDesc, id));
}
}