aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-11 16:17:48 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-09-14 17:14:30 +0000
commit589e3e81a86c83456580e112978bf7a0ed5f43ac (patch)
tree0b273313f7bb8fd34696abd129bd3402d737ef4a /src/armnnTfLiteParser
parent04a729708f986b1a69c1efc42d5cf18271cfae1e (diff)
downloadarmnn-589e3e81a86c83456580e112978bf7a0ed5f43ac.tar.gz
IVGCVSW-5302 Remove some boost::numeric_cast from parsers
* Replaced with armnn/utility/NumericCast.hpp * Exclusions in armnnCaffeParser * Three excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ib468b606238694334a8319d0ed5db381ce37a915
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp23
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp9
2 files changed, 16 insertions, 16 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 8bc475347c..109c2c2be1 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -28,7 +28,6 @@
#include <flatbuffers/flexbuffers.h>
#include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <fstream>
#include <algorithm>
@@ -388,10 +387,10 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
{
// NOTE: we lose precision here when converting from 64 bit to 32
// but this is what we support at the moment in ArmNN
- quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
+ quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
}
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -414,7 +413,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
std::back_inserter(quantizationScales));
// QSymmS8 Per-axis
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -423,14 +422,14 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
armnn::TensorInfo result(tensorShape,
type,
quantizationScales,
- dimensionMappings[boost::numeric_cast<unsigned int>(
+ dimensionMappings[armnn::numeric_cast<unsigned int>(
tensorPtr->quantization->quantized_dimension)]);
return result;
}
}
else
{
- TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+ TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
safeShape.data());
if (isDynamic)
{
@@ -866,8 +865,8 @@ void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operato
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
- const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
- const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
+ const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
+ const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
StandInDescriptor descriptor(numInputs, numOutputs);
auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
@@ -2144,7 +2143,7 @@ armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & i
}
auto targetNumElements =
- boost::numeric_cast<unsigned int>(
+ armnn::numeric_cast<unsigned int>(
std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
@@ -2899,14 +2898,14 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
// Check for inferred Axis
if (numInferred == 0)
{
- if (splitSum != numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
+ if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
{
throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
}
}
else if (numInferred == 1)
{
- splitsData[inferIdx] = numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
+ splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
}
else
{
@@ -2922,7 +2921,7 @@ void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
unsigned int accumSplit = 0;
for (unsigned int j = 0; j < numSplits; ++j)
{
- unsigned int splitSize = numeric_cast<unsigned int>(splitsData[j]);
+ unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
// Set the size of the views.
for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index dd77bcacb1..b0ac2d60ad 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -8,6 +8,7 @@
#include <armnn/LayerVisitorBase.hpp>
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <layers/StandInLayer.hpp>
@@ -39,11 +40,11 @@ public:
const StandInDescriptor& descriptor,
const char*) override
{
- unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
+ unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
BOOST_CHECK(descriptor.m_NumInputs == numInputs);
BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
- unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
+ unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
@@ -77,10 +78,10 @@ public:
: ParserFlatbuffersFixture()
, m_StandInLayerVerifier(inputInfos, outputInfos)
{
- const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
+ const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputInfos.size());
ARMNN_ASSERT(numInputs > 0);
- const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
+ const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputInfos.size());
ARMNN_ASSERT(numOutputs > 0);
m_JsonString = R"(