aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/TfLiteParser.cpp
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-11 16:17:48 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-09-14 17:14:30 +0000
commit589e3e81a86c83456580e112978bf7a0ed5f43ac (patch)
tree0b273313f7bb8fd34696abd129bd3402d737ef4a /src/armnnTfLiteParser/TfLiteParser.cpp
parent04a729708f986b1a69c1efc42d5cf18271cfae1e (diff)
downloadarmnn-589e3e81a86c83456580e112978bf7a0ed5f43ac.tar.gz
IVGCVSW-5302 Remove some boost::numeric_cast from parsers
* Replaced with armnn/utility/NumericCast.hpp * Exclusions in armnnCaffeParser * Three excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ib468b606238694334a8319d0ed5db381ce37a915
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp23
1 files changed, 11 insertions, 12 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 8bc475347c..109c2c2be1 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -28,7 +28,6 @@
#include <flatbuffers/flexbuffers.h>
#include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <fstream>
#include <algorithm>
@@ -388,10 +387,10 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
{
// NOTE: we lose precision here when converting from 64 bit to 32
// but this is what we support at the moment in ArmNN
- quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
+ quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
}
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -414,7 +413,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
std::back_inserter(quantizationScales));
// QSymmS8 Per-axis
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -423,14 +422,14 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
armnn::TensorInfo result(tensorShape,
type,
quantizationScales,
- dimensionMappings[boost::numeric_cast<unsigned int>(
+ dimensionMappings[armnn::numeric_cast<unsigned int>(
tensorPtr->quantization->quantized_dimension)]);
return result;
}
}
else
{
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -866,8 +865,8 @@ void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operato
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
- const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
- const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
+ const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
+ const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
StandInDescriptor descriptor(numInputs, numOutputs);
auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
@@ -2144,7 +2143,7 @@ armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & i
}
auto targetNumElements =
- boost::numeric_cast<unsigned int>(
+ armnn::numeric_cast<unsigned int>(
std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
@@ -2899,14 +2898,14 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
// Check for inferred Axis
if (numInferred == 0)
{
- if (splitSum != numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
+ if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
{
throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
}
}
else if (numInferred == 1)
{
- splitsData[inferIdx] = numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
+ splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
}
else
{
@@ -2922,7 +2921,7 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
unsigned int accumSplit = 0;
for (unsigned int j = 0; j < numSplits; ++j)
{
- unsigned int splitSize = numeric_cast<unsigned int>(splitsData[j]);
+ unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
// Set the size of the views.
for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)