aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-11 16:17:48 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-09-14 17:14:30 +0000
commit589e3e81a86c83456580e112978bf7a0ed5f43ac (patch)
tree0b273313f7bb8fd34696abd129bd3402d737ef4a
parent04a729708f986b1a69c1efc42d5cf18271cfae1e (diff)
downloadarmnn-589e3e81a86c83456580e112978bf7a0ed5f43ac.tar.gz
IVGCVSW-5302 Remove some boost::numeric_cast from parsers
* Replaced with armnn/utility/NumericCast.hpp * Exclusions in armnnCaffeParser * Three excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ib468b606238694334a8319d0ed5db381ce37a915
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp51
-rw-r--r--src/armnnCaffeParser/RecordByRecordCaffeParser.cpp18
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp4
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp23
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp9
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp14
6 files changed, 59 insertions, 60 deletions
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index b8ce470505..d50846abab 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -14,6 +14,7 @@
#include "VerificationHelpers.hpp"
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/format.hpp>
@@ -66,7 +67,7 @@ namespace
const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int blobIndex)
{
auto nBlobs = layerParam.blobs_size();
- if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
+ if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
{
throw ParseException(
boost::str(
@@ -78,7 +79,7 @@ const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int
CHECK_LOCATION().AsString()));
}
- const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
+ const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
const float* arrayPtr = blob.data().data();
return arrayPtr;
@@ -87,7 +88,7 @@ const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int
void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, unsigned int blobIndex)
{
auto nBlobs = layerParam.blobs_size();
- if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
+ if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
{
throw ParseException(
boost::str(
@@ -98,9 +99,9 @@ void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, u
CHECK_LOCATION().AsString()));
}
- const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
+ const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
- size_t blobSize = boost::numeric_cast<size_t>(blob.data_size());
+ size_t blobSize = armnn::numeric_cast<size_t>(blob.data_size());
if (blobSize != outData.size())
{
throw ParseException(
@@ -115,7 +116,7 @@ void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, u
CHECK_LOCATION().AsString()));
}
- int outSizeInt = boost::numeric_cast<int>(outData.size());
+ int outSizeInt = armnn::numeric_cast<int>(outData.size());
for (int i = 0; i < outSizeInt; ++i)
{
outData[static_cast<size_t>(i)] = blob.data(i);
@@ -133,7 +134,7 @@ void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter,
unsigned int numOutputs)
{
int numInputsActual = layerParameter.bottom_size();
- if (numInputs != boost::numeric_cast<unsigned int>(numInputsActual))
+ if (numInputs != armnn::numeric_cast<unsigned int>(numInputsActual))
{
throw ParseException(
boost::str(
@@ -146,7 +147,7 @@ void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter,
}
int numOutputsActual = layerParameter.top_size();
- if (numOutputs != boost::numeric_cast<unsigned int>(numOutputsActual))
+ if (numOutputs != armnn::numeric_cast<unsigned int>(numOutputsActual))
{
throw ParseException(
boost::str(
@@ -320,7 +321,7 @@ TensorInfo CaffeParserBase::BlobShapeToTensorInfo(const caffe::BlobShape& blobSh
shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
}
- return TensorInfo(boost::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
+ return TensorInfo(armnn::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
}
BlobShape TensorDescToBlobShape(const TensorInfo& desc)
@@ -329,7 +330,7 @@ BlobShape TensorDescToBlobShape(const TensorInfo& desc)
for (unsigned int i = 0; i < desc.GetNumDimensions(); ++i)
{
ret.add_dim(i);
- ret.set_dim(boost::numeric_cast<int>(i), desc.GetShape()[i]);
+ ret.set_dim(armnn::numeric_cast<int>(i), desc.GetShape()[i]);
}
return ret;
@@ -340,7 +341,7 @@ BlobShape TensorDescToBlobShape(const TensorInfo& desc)
vector<const LayerParameter*> CaffeParserBase::GetInputs(const LayerParameter& layerParam)
{
std::vector<const caffe::LayerParameter*> ret;
- ret.reserve(boost::numeric_cast<size_t>(layerParam.bottom_size()));
+ ret.reserve(armnn::numeric_cast<size_t>(layerParam.bottom_size()));
for (int j = 0; j < layerParam.bottom_size(); ++j)
{
std::string inputName = layerParam.bottom(j);
@@ -369,7 +370,7 @@ void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam)
const InputParameter& param = layerParam.input_param();
- const armnn::LayerBindingId inputId = boost::numeric_cast<armnn::LayerBindingId>(
+ const armnn::LayerBindingId inputId = armnn::numeric_cast<armnn::LayerBindingId>(
m_NetworkInputsBindingInfo.size());
armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str());
@@ -504,7 +505,7 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
static_cast<float>(desc.m_StrideX)) + 1));
// Load the weight data for ALL groups
- vector<float> weightData(boost::numeric_cast<size_t>(numGroups *
+ vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
inputShape.dim(1) * // number of input channels
outputShape.dim(1) * // number of output channels
kernelH *
@@ -522,15 +523,15 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
if (desc.m_BiasEnabled)
{
- biasData.resize(boost::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
+ biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
GetDataFromBlob(layerParam, biasData, 1);
const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
}
- const unsigned int numWeightsPerGroup = boost::numeric_cast<unsigned int>(weightData.size()) / numGroups;
- const unsigned int numBiasesPerGroup = boost::numeric_cast<unsigned int>(biasData.size()) / numGroups;
+ const unsigned int numWeightsPerGroup = armnn::numeric_cast<unsigned int>(weightData.size()) / numGroups;
+ const unsigned int numBiasesPerGroup = armnn::numeric_cast<unsigned int>(biasData.size()) / numGroups;
for (unsigned int g = 0; g < numGroups; ++g)
{
@@ -648,7 +649,7 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
static_cast<float>(desc.m_StrideX)) + 1));
// Load the weight data
- size_t allWeightsSize = boost::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
+ size_t allWeightsSize = armnn::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
vector<float> weightData(allWeightsSize);
GetDataFromBlob(layerParam, weightData, 0);
@@ -668,7 +669,7 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
{
TensorInfo biasInfo;
- biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
+ biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
GetDataFromBlob(layerParam, biasData, 1);
const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
@@ -824,7 +825,7 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
static_cast<float>(strideW)) + 1));
// Load the weight data for ALL groups
- vector<float> weightData(boost::numeric_cast<size_t>(inputShape.dim(1) *
+ vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
outputShape.dim(1) *
kernelH *
kernelW));
@@ -846,7 +847,7 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
{
TensorInfo biasInfo;
- biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
+ biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
GetDataFromBlob(layerParam, biasData, 1);
const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
@@ -1290,7 +1291,7 @@ void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam)
for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
{
const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(
- layerParam.bottom(boost::numeric_cast<int>(viewIndex))).GetTensorInfo();
+ layerParam.bottom(armnn::numeric_cast<int>(viewIndex))).GetTensorInfo();
// Checks whether the dimensions of the input tensors are actually 4.
if (inputInfo.GetNumDimensions()!=4)
{
@@ -1328,7 +1329,7 @@ void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam)
armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str());
for (unsigned int i = 0; i < numInputs; ++i)
{
- armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast<int>(i)));
+ armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(armnn::numeric_cast<int>(i)));
outputSlot.Connect(concatlayer->GetInputSlot(i));
}
@@ -1375,8 +1376,8 @@ void CaffeParserBase::ParseBatchNormLayer(const LayerParameter& layerParam)
GetDataFromBlob(layerParam, varianceData, 1);
// Reads moving average factor and applies scaling (if required).
- const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
- const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
+ const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(2));
+ const float movingAverageFactor = blob.data(armnn::numeric_cast<int>(0));
if(movingAverageFactor != 0.0f)
{
const float scaleFactor = 1.0f / movingAverageFactor;
@@ -1722,7 +1723,7 @@ void CaffeParserBase::LoadNetParam(NetParameter& netParameter)
{
armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
- const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>(
+ const armnn::LayerBindingId outputId = armnn::numeric_cast<armnn::LayerBindingId>(
m_NetworkOutputsBindingInfo.size());
armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
outputSlot.Connect(outputLayer->GetInputSlot(0));
diff --git a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
index cb7943655d..a59725cbd2 100644
--- a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
+++ b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
@@ -7,12 +7,10 @@
#include "armnn/Exceptions.hpp"
#include "armnn/Utils.hpp"
-
+#include <armnn/utility/NumericCast.hpp>
#include "GraphTopologicalSort.hpp"
-#include <boost/numeric/conversion/cast.hpp>
-
// Caffe
#include <google/protobuf/wire_format.h>
@@ -282,7 +280,7 @@ std::unique_ptr<char[]> AllocateBuffer(std::ifstream& ifs, VarLenDataInfo& dataI
std::unique_ptr<char[]> ptr(new char[dataInfo.SizeOfData()]);
ifs.clear();
ifs.seekg(dataInfo.PositionOfData(), std::ios_base::beg);
- ifs.read(ptr.get(), boost::numeric_cast<std::streamsize>(dataInfo.SizeOfData()));
+ ifs.read(ptr.get(), armnn::numeric_cast<std::streamsize>(dataInfo.SizeOfData()));
return ptr;
}
@@ -299,12 +297,12 @@ VarLenDataInfo CreateVarLenDataInfo(std::streamoff bufferStart, std::streamoff e
// on the platform in which I am currently compiling std::streamoff is signed long int and
// size_t is unsigned long int so there is no way this error condition can fire but this stuff
// is supposed to be portable so the check remains in place
- if (boost::numeric_cast<size_t>(sizeOfLayer) > SIZE_MAX) {
+ if (armnn::numeric_cast<size_t>(sizeOfLayer) > SIZE_MAX) {
std::stringstream ss;
ss << "layer is greater than " << SIZE_MAX << " in size cannot process. layer size = [" << sizeOfLayer << "]";
throw armnn::ParseException(ss.str());
}
- LayerParameterInfo info(bufferStart, boost::numeric_cast<size_t>(sizeOfLayer));
+ LayerParameterInfo info(bufferStart, armnn::numeric_cast<size_t>(sizeOfLayer));
return info;
}
@@ -314,7 +312,7 @@ void ReadTopologicalInfoForLayerParameter(LayerParameterInfo& layerInfo, std::if
ifs.clear();
ifs.seekg(layerInfo.PositionOfData(), std::ios_base::beg);
std::streamoff endOfLayer = layerInfo.PositionOfData() +
- boost::numeric_cast<std::streamoff>(layerInfo.SizeOfData());
+ armnn::numeric_cast<std::streamoff>(layerInfo.SizeOfData());
while(true)
{
// check to see if we have reached the end of the record
@@ -342,7 +340,7 @@ void ReadTopologicalInfoForLayerParameter(LayerParameterInfo& layerInfo, std::if
{
int size = ReadBase128(ifs);
std::streamoff posStartOfData = ifs.tellg();
- VarLenDataInfo dataInfo(posStartOfData, boost::numeric_cast<size_t>(size));
+ VarLenDataInfo dataInfo(posStartOfData, armnn::numeric_cast<size_t>(size));
//optional string name = 1; // the layer name
//optional string type = 2; // the layer type
//repeated string bottom = 3; // the name of each bottom blob
@@ -684,7 +682,7 @@ armnn::INetworkPtr RecordByRecordCaffeParser::LoadLayers(std::ifstream& ifs,
char *buffer = new char[info->SizeOfData()];
ifs.clear();
ifs.seekg(info->PositionOfData(), std::ios_base::beg);
- ifs.read(buffer, boost::numeric_cast<std::streamsize>(info->SizeOfData()));
+ ifs.read(buffer, armnn::numeric_cast<std::streamsize>(info->SizeOfData()));
bool bRet = layer.ParseFromArray(buffer, static_cast<int>(info->SizeOfData()));
delete[] buffer;
if (!bRet)
@@ -719,7 +717,7 @@ armnn::INetworkPtr RecordByRecordCaffeParser::LoadLayers(std::ifstream& ifs,
{
armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
- const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>(
+ const armnn::LayerBindingId outputId = armnn::numeric_cast<armnn::LayerBindingId>(
m_NetworkOutputsBindingInfo.size());
armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
outputSlot.Connect(outputLayer->GetInputSlot(0));
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index a07a899488..01ad12448f 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -6,10 +6,10 @@
#include <armnn/Descriptors.hpp>
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <VerificationHelpers.hpp>
#include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
@@ -350,7 +350,7 @@ TensorInfo ComputeReshapeInfo(const TensorShape& targetShapeTensor,
% CHECK_LOCATION().AsString()));
}
- auto targetNumElements = boost::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
+ auto targetNumElements = armnn::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
-1, std::multiplies<int32_t>()));
auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
outDims[stretchIndex] = inShape.GetNumElements() / targetNumElements;
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 8bc475347c..109c2c2be1 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -28,7 +28,6 @@
#include <flatbuffers/flexbuffers.h>
#include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <fstream>
#include <algorithm>
@@ -388,10 +387,10 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
{
// NOTE: we lose precision here when converting from 64 bit to 32
// but this is what we support at the moment in ArmNN
- quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
+ quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
}
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -414,7 +413,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
std::back_inserter(quantizationScales));
// QSymmS8 Per-axis
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -423,14 +422,14 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
armnn::TensorInfo result(tensorShape,
type,
quantizationScales,
- dimensionMappings[boost::numeric_cast<unsigned int>(
+ dimensionMappings[armnn::numeric_cast<unsigned int>(
tensorPtr->quantization->quantized_dimension)]);
return result;
}
}
else
{
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -866,8 +865,8 @@ void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operato
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
- const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
- const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
+ const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
+ const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
StandInDescriptor descriptor(numInputs, numOutputs);
auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
@@ -2144,7 +2143,7 @@ armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & i
}
auto targetNumElements =
- boost::numeric_cast<unsigned int>(
+ armnn::numeric_cast<unsigned int>(
std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
@@ -2899,14 +2898,14 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
// Check for inferred Axis
if (numInferred == 0)
{
- if (splitSum != numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
+ if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
{
throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
}
}
else if (numInferred == 1)
{
- splitsData[inferIdx] = numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
+ splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
}
else
{
@@ -2922,7 +2921,7 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
unsigned int accumSplit = 0;
for (unsigned int j = 0; j < numSplits; ++j)
{
- unsigned int splitSize = numeric_cast<unsigned int>(splitsData[j]);
+ unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
// Set the size of the views.
for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index dd77bcacb1..b0ac2d60ad 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -8,6 +8,7 @@
#include <armnn/LayerVisitorBase.hpp>
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <layers/StandInLayer.hpp>
@@ -39,11 +40,11 @@ public:
const StandInDescriptor& descriptor,
const char*) override
{
- unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
+ unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
BOOST_CHECK(descriptor.m_NumInputs == numInputs);
BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
- unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
+ unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
@@ -77,10 +78,10 @@ public:
: ParserFlatbuffersFixture()
, m_StandInLayerVerifier(inputInfos, outputInfos)
{
- const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
+ const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputInfos.size());
ARMNN_ASSERT(numInputs > 0);
- const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
+ const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputInfos.size());
ARMNN_ASSERT(numOutputs > 0);
m_JsonString = R"(
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 0d7c371eae..8046a5521c 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -12,6 +12,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Transpose.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <GraphTopologicalSort.hpp>
@@ -23,7 +24,6 @@
#include <tensorflow/core/framework/graph.pb.h>
#include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <fmt/core.h>
#include <numeric>
@@ -250,7 +250,7 @@ TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& t
}
auto targetNumElements =
- boost::numeric_cast<unsigned int>(
+ armnn::numeric_cast<unsigned int>(
std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
@@ -563,7 +563,7 @@ TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
return ret;
}
- ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
+ ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
for (int j = 0; j < nodeDef.input_size(); ++j)
{
OutputId outputId = ParseOutputId(nodeDef.input(j));
@@ -1480,7 +1480,7 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
% CHECK_LOCATION().AsString()));
}
- std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+ std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
std::vector<uint32_t> outputDims;
// expandDim operation requires: -1-input.dims() <= dim <= input.dims()
@@ -1503,7 +1503,7 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
// and insert 1 dimension at index 'expandDim'
if (expandDim < 0)
{
- int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
+ int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
outputDims.insert(getPosition, 1);
}
@@ -2766,7 +2766,7 @@ ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeD
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
- const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
+ const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
auto it = m_InputShapes.find(nodeDef.name());
if (it == m_InputShapes.end())
@@ -3524,7 +3524,7 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
m_RequestedOutputs.end())
{
auto outId = ParseOutputId(nodeDef.name());
- const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
+ const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
TensorInfo tensorInfo = prevSlot.GetTensorInfo();