aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/test
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2021-02-03 17:38:41 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2021-02-04 11:23:41 +0000
commit7d96b16acfdbdcef6739d59ba067a37c062aa03f (patch)
treebb0a93f9030e26568d6d7c44776993e433996c31 /src/armnnTfLiteParser/test
parentd0bb8eafef2a93356e435ccc4029d487a2cde9e4 (diff)
downloadarmnn-7d96b16acfdbdcef6739d59ba067a37c062aa03f.tar.gz
IVGCVSW-5592 Implement Pimpl Idiom for Tf and TfLite Parsers
Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I4a82aca4a2c47b3c598b91bc0075c09397be728a
Diffstat (limited to 'src/armnnTfLiteParser/test')
-rw-r--r--src/armnnTfLiteParser/test/Constant.cpp2
-rw-r--r--src/armnnTfLiteParser/test/GetBuffer.cpp25
-rw-r--r--src/armnnTfLiteParser/test/GetInputsOutputs.cpp58
-rw-r--r--src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp50
-rw-r--r--src/armnnTfLiteParser/test/GetTensorIds.cpp48
-rw-r--r--src/armnnTfLiteParser/test/LoadModel.cpp21
-rw-r--r--src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp6
-rw-r--r--src/armnnTfLiteParser/test/ResizeBilinear.cpp2
-rw-r--r--src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp2
9 files changed, 119 insertions, 95 deletions
diff --git a/src/armnnTfLiteParser/test/Constant.cpp b/src/armnnTfLiteParser/test/Constant.cpp
index cc89223469..bfb76a9791 100644
--- a/src/armnnTfLiteParser/test/Constant.cpp
+++ b/src/armnnTfLiteParser/test/Constant.cpp
@@ -10,7 +10,7 @@
#include <string>
#include <iostream>
-using armnnTfLiteParser::TfLiteParser;
+using armnnTfLiteParser::TfLiteParserImpl;
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
diff --git a/src/armnnTfLiteParser/test/GetBuffer.cpp b/src/armnnTfLiteParser/test/GetBuffer.cpp
index cccdbce7aa..0e72522c79 100644
--- a/src/armnnTfLiteParser/test/GetBuffer.cpp
+++ b/src/armnnTfLiteParser/test/GetBuffer.cpp
@@ -8,7 +8,7 @@
#include "../TfLiteParser.hpp"
#include <sstream>
-using armnnTfLiteParser::TfLiteParser;
+using armnnTfLiteParser::TfLiteParserImpl;
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
@@ -88,12 +88,12 @@ struct GetBufferFixture : public ParserFlatbuffersFixture
ReadStringToBinary();
}
- void CheckBufferContents(const TfLiteParser::ModelPtr& model,
+ void CheckBufferContents(const TfLiteParserImpl::ModelPtr& model,
std::vector<int32_t> bufferValues, size_t bufferIndex)
{
for(long unsigned int i=0; i<bufferValues.size(); i++)
{
- BOOST_CHECK_EQUAL(TfLiteParser::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
+ BOOST_CHECK_EQUAL(TfLiteParserImpl::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
}
}
};
@@ -101,7 +101,8 @@ struct GetBufferFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
{
//Check contents of buffer are correct
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
std::vector<int32_t> bufferValues = {2,1,0,6,2,1,4,1,2};
CheckBufferContents(model, bufferValues, 2);
}
@@ -109,18 +110,20 @@ BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
BOOST_FIXTURE_TEST_CASE(GetBufferCheckEmpty, GetBufferFixture)
{
//Check if test fixture buffers are empty or not
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK(TfLiteParser::GetBuffer(model, 0)->data.empty());
- BOOST_CHECK(TfLiteParser::GetBuffer(model, 1)->data.empty());
- BOOST_CHECK(!TfLiteParser::GetBuffer(model, 2)->data.empty());
- BOOST_CHECK(TfLiteParser::GetBuffer(model, 3)->data.empty());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 0)->data.empty());
+ BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 1)->data.empty());
+ BOOST_CHECK(!TfLiteParserImpl::GetBuffer(model, 2)->data.empty());
+ BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 3)->data.empty());
}
BOOST_FIXTURE_TEST_CASE(GetBufferCheckParseException, GetBufferFixture)
{
//Check if armnn::ParseException thrown when invalid buffer index used
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetBuffer(model, 4), armnn::Exception);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetBuffer(model, 4), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
index 824797890d..894de0c3a0 100644
--- a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
@@ -6,8 +6,8 @@
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
@@ -152,22 +152,25 @@ struct GetInputsOutputsFixture : GetInputsOutputsMainFixture
BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
BOOST_CHECK_EQUAL(0, tensors.size());
}
BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
BOOST_CHECK_EQUAL(0, tensors.size());
}
BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
BOOST_CHECK_EQUAL(1, tensors.size());
CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
"InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
@@ -175,8 +178,9 @@ BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
BOOST_CHECK_EQUAL(1, tensors.size());
CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
"OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
@@ -184,8 +188,9 @@ BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 1, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 1, 0);
BOOST_CHECK_EQUAL(2, tensors.size());
CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
"ConvInputTensor", { }, { }, { 1.0f }, { 0 });
@@ -195,8 +200,9 @@ BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 1, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 1, 0);
BOOST_CHECK_EQUAL(1, tensors.size());
CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
"ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
@@ -204,36 +210,40 @@ BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
BOOST_AUTO_TEST_CASE(GetInputsNullModel)
{
- BOOST_CHECK_THROW(TfLiteParser::GetInputs(nullptr, 0, 0), armnn::ParseException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
}
BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
{
- BOOST_CHECK_THROW(TfLiteParser::GetOutputs(nullptr, 0, 0), armnn::ParseException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 2, 0), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 2, 0), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 0, 1), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 0, 1), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
}
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
index e0fbd353ad..100e8e96d5 100644
--- a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
@@ -6,9 +6,9 @@
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
-using TensorRawPtr = TfLiteParser::TensorRawPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
+using TensorRawPtr = TfLiteParserImpl::TensorRawPtr;
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
@@ -153,22 +153,25 @@ struct GetSubgraphInputsOutputsFixture : GetSubgraphInputsOutputsMainFixture
BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphInputs, GetEmptySubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
BOOST_CHECK_EQUAL(0, subgraphTensors.size());
}
BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphOutputs, GetEmptySubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
BOOST_CHECK_EQUAL(0, subgraphTensors.size());
}
BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
BOOST_CHECK_EQUAL(1, subgraphTensors.size());
BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
@@ -177,8 +180,9 @@ BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
BOOST_CHECK_EQUAL(1, subgraphTensors.size());
BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
@@ -187,8 +191,9 @@ BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutp
BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 1);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 1);
BOOST_CHECK_EQUAL(1, subgraphTensors.size());
BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
@@ -197,8 +202,9 @@ BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFi
BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 1);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 1);
BOOST_CHECK_EQUAL(1, subgraphTensors.size());
BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
@@ -207,24 +213,26 @@ BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
BOOST_AUTO_TEST_CASE(GetSubgraphInputsNullModel)
{
- BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
}
BOOST_AUTO_TEST_CASE(GetSubgraphOutputsNullModel)
{
- BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(model, 2), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(model, 2), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(model, 2), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(model, 2), armnn::ParseException);
}
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetTensorIds.cpp b/src/armnnTfLiteParser/test/GetTensorIds.cpp
index 6b82bb1f97..f45f6e66b9 100644
--- a/src/armnnTfLiteParser/test/GetTensorIds.cpp
+++ b/src/armnnTfLiteParser/test/GetTensorIds.cpp
@@ -6,8 +6,8 @@
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
@@ -91,72 +91,80 @@ struct GetInputOutputTensorIdsFixture : GetTensorIdsFixture
BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
std::vector<int32_t> expectedIds = { };
- std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+ std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
inputTensorIds.begin(), inputTensorIds.end());
}
BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
std::vector<int32_t> expectedIds = { };
- std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+ std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
outputTensorIds.begin(), outputTensorIds.end());
}
BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
- std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+ std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
inputTensorIds.begin(), inputTensorIds.end());
}
BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
std::vector<int32_t> expectedOutputIds = { 3 };
- std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+ std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
outputTensorIds.begin(), outputTensorIds.end());
}
BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
{
- BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
{
- BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 1, 0), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 0, 1), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
}
BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/LoadModel.cpp b/src/armnnTfLiteParser/test/LoadModel.cpp
index 9777333495..1afb5f12e5 100644
--- a/src/armnnTfLiteParser/test/LoadModel.cpp
+++ b/src/armnnTfLiteParser/test/LoadModel.cpp
@@ -8,10 +8,10 @@
#include <Filesystem.hpp>
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
-using SubgraphPtr = TfLiteParser::SubgraphPtr;
-using OperatorPtr = TfLiteParser::OperatorPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
+using SubgraphPtr = TfLiteParserImpl::SubgraphPtr;
+using OperatorPtr = TfLiteParserImpl::OperatorPtr;
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
@@ -185,7 +185,8 @@ struct LoadModelFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
{
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+ m_GraphBinary.size());
CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
2, "Test loading a model", 2);
CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
@@ -205,7 +206,7 @@ BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
m_GraphBinary.size(), true);
BOOST_CHECK_MESSAGE(saved, "Cannot save test file");
- TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromFile(fname.c_str());
+ TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromFile(fname.c_str());
CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
2, "Test loading a model", 2);
CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
@@ -219,24 +220,24 @@ BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
BOOST_AUTO_TEST_CASE(LoadNullBinary)
{
- BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
}
BOOST_AUTO_TEST_CASE(LoadInvalidBinary)
{
std::string testData = "invalid data";
- BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
+ BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
testData.length()), armnn::ParseException);
}
BOOST_AUTO_TEST_CASE(LoadFileNotFound)
{
- BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
}
BOOST_AUTO_TEST_CASE(LoadNullPtrFile)
{
- BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
+ BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
index 26cd92a813..e616158f29 100644
--- a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
+++ b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
@@ -11,12 +11,10 @@
struct TfLiteParserFixture
{
- armnnTfLiteParser::TfLiteParser m_Parser;
+ armnnTfLiteParser::TfLiteParserImpl m_Parser;
unsigned int m_InputShape[4];
- TfLiteParserFixture() : m_Parser( ), m_InputShape { 1, 2, 2, 1 } {
- m_Parser.Create();
- }
+ TfLiteParserFixture() : m_Parser( ), m_InputShape { 1, 2, 2, 1 } {}
~TfLiteParserFixture() { }
};
diff --git a/src/armnnTfLiteParser/test/ResizeBilinear.cpp b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
index 400dc78b67..8af5612b9e 100644
--- a/src/armnnTfLiteParser/test/ResizeBilinear.cpp
+++ b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
@@ -10,8 +10,6 @@
#include <string>
#include <iostream>
-using armnnTfLiteParser::TfLiteParser;
-
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
struct ResizeBilinearFixture : public ParserFlatbuffersFixture
diff --git a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
index fada810fbd..7add5f2a3e 100644
--- a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
+++ b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
@@ -10,8 +10,6 @@
#include <string>
#include <iostream>
-using armnnTfLiteParser::TfLiteParser;
-
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
struct ResizeNearestNeighborFixture : public ParserFlatbuffersFixture