aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-01-22 16:10:44 +0000
committerNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-01-22 16:10:44 +0000
commit649dd9515ddf4bd00a0bff64d51dfd835a6c7b39 (patch)
treec938bc8eb11dd24223c0cb00a57d4372a907b943 /src/armnnTfLiteParser
parent382e21ce95c04479a6900afca81a57949b369f1e (diff)
downloadarmnn-649dd9515ddf4bd00a0bff64d51dfd835a6c7b39.tar.gz
IVGCVSW-2467 Remove GetDataType<T> function
Change-Id: I7359617a307b9abb4c30b3d5f2364dc6d0f828f0
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/test/Activations.cpp8
-rw-r--r--src/armnnTfLiteParser/test/Addition.cpp10
-rw-r--r--src/armnnTfLiteParser/test/AvgPool2D.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Concatenation.cpp97
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp8
-rw-r--r--src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp6
-rw-r--r--src/armnnTfLiteParser/test/FullyConnected.cpp4
-rw-r--r--src/armnnTfLiteParser/test/MaxPool2D.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Mean.cpp5
-rw-r--r--src/armnnTfLiteParser/test/Multiplication.cpp25
-rw-r--r--src/armnnTfLiteParser/test/Pad.cpp14
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp38
-rw-r--r--src/armnnTfLiteParser/test/Reshape.cpp24
-rw-r--r--src/armnnTfLiteParser/test/Softmax.cpp3
-rw-r--r--src/armnnTfLiteParser/test/Squeeze.cpp4
15 files changed, 138 insertions, 126 deletions
diff --git a/src/armnnTfLiteParser/test/Activations.cpp b/src/armnnTfLiteParser/test/Activations.cpp
index a30d46408c..534ae4cb73 100644
--- a/src/armnnTfLiteParser/test/Activations.cpp
+++ b/src/armnnTfLiteParser/test/Activations.cpp
@@ -70,8 +70,8 @@ struct ReLuFixture : ActivationFixture
};
BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
{
- RunTest<2, float>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
- { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
+ RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
}
struct ReLu6Fixture : ActivationFixture
@@ -80,8 +80,8 @@ struct ReLu6Fixture : ActivationFixture
};
BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
{
- RunTest<2, float>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
- { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
+ RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Addition.cpp b/src/armnnTfLiteParser/test/Addition.cpp
index 53a0c40337..94389d3134 100644
--- a/src/armnnTfLiteParser/test/Addition.cpp
+++ b/src/armnnTfLiteParser/test/Addition.cpp
@@ -97,11 +97,11 @@ struct SimpleAddFixture : AddFixture
BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
{
- RunTest<2, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 4, 6, 8, 10 }}});
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 4, 6, 8, 10 }}});
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
index 2fac9079c8..a39c088d44 100644
--- a/src/armnnTfLiteParser/test/AvgPool2D.cpp
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -98,22 +98,23 @@ struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
{
- RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
{
- RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
+ RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
{
- RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
}
BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Concatenation.cpp b/src/armnnTfLiteParser/test/Concatenation.cpp
index 8629efe3d7..bb5aebf39c 100644
--- a/src/armnnTfLiteParser/test/Concatenation.cpp
+++ b/src/armnnTfLiteParser/test/Concatenation.cpp
@@ -100,10 +100,11 @@ struct ConcatenationFixtureNegativeDim : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
}
struct ConcatenationFixtureNCHW : ConcatenationFixture
@@ -113,10 +114,11 @@ struct ConcatenationFixtureNCHW : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
}
struct ConcatenationFixtureNHWC : ConcatenationFixture
@@ -126,10 +128,11 @@ struct ConcatenationFixtureNHWC : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}});
}
struct ConcatenationFixtureDim1 : ConcatenationFixture
@@ -139,15 +142,16 @@ struct ConcatenationFixtureDim1 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
{
- RunTest<4, uint8_t>(0,
- { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
- { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } },
- { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
- 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
+ { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } },
+ { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } });
}
struct ConcatenationFixtureDim3 : ConcatenationFixture
@@ -157,31 +161,32 @@ struct ConcatenationFixtureDim3 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
{
- RunTest<4, uint8_t>(0,
- { { "inputTensor1", { 0, 1, 2, 3,
- 4, 5, 6, 7,
- 8, 9, 10, 11,
- 12, 13, 14, 15,
- 16, 17, 18, 19,
- 20, 21, 22, 23 } },
- { "inputTensor2", { 50, 51, 52, 53,
- 54, 55, 56, 57,
- 58, 59, 60, 61,
- 62, 63, 64, 65,
- 66, 67, 68, 69,
- 70, 71, 72, 73 } } },
- { { "outputTensor", { 0, 1, 2, 3,
- 50, 51, 52, 53,
- 4, 5, 6, 7,
- 54, 55, 56, 57,
- 8, 9, 10, 11,
- 58, 59, 60, 61,
- 12, 13, 14, 15,
- 62, 63, 64, 65,
- 16, 17, 18, 19,
- 66, 67, 68, 69,
- 20, 21, 22, 23,
- 70, 71, 72, 73 } } });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { { "inputTensor1", { 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 16, 17, 18, 19,
+ 20, 21, 22, 23 } },
+ { "inputTensor2", { 50, 51, 52, 53,
+ 54, 55, 56, 57,
+ 58, 59, 60, 61,
+ 62, 63, 64, 65,
+ 66, 67, 68, 69,
+ 70, 71, 72, 73 } } },
+ { { "outputTensor", { 0, 1, 2, 3,
+ 50, 51, 52, 53,
+ 4, 5, 6, 7,
+ 54, 55, 56, 57,
+ 8, 9, 10, 11,
+ 58, 59, 60, 61,
+ 12, 13, 14, 15,
+ 62, 63, 64, 65,
+ 16, 17, 18, 19,
+ 66, 67, 68, 69,
+ 20, 21, 22, 23,
+ 70, 71, 72, 73 } } });
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index 79bef733c9..38c6675ddb 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -89,7 +89,7 @@ struct SimpleConv2DFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2, 3,
@@ -219,7 +219,7 @@ struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
@@ -290,7 +290,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture
uint8_t outZero = 20;
uint8_t fz = 4; // filter zero point
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
@@ -331,7 +331,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixtu
{
uint8_t relu6Min = 6 / 2; // divide by output scale
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
index e8262f8313..c0767801b3 100644
--- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -133,7 +133,7 @@ struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
@@ -160,7 +160,7 @@ struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
@@ -185,7 +185,7 @@ struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index 14ca57c2ab..7ee64a476e 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -125,7 +125,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, uint8_t>(
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
0,
{ 10, 20, 30, 40 },
{ 400/2 });
@@ -145,7 +145,7 @@ struct FullyConnectedWithBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
{
- RunTest<2, uint8_t>(
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
0,
{ 10, 20, 30, 40 },
{ (400+10)/2 });
diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp
index 06bf7806cc..759fc37ccd 100644
--- a/src/armnnTfLiteParser/test/MaxPool2D.cpp
+++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp
@@ -98,22 +98,23 @@ struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
{
- RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
{
- RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
+ RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
{
- RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Mean.cpp b/src/armnnTfLiteParser/test/Mean.cpp
index d7cb2a20f0..3f0fdf14d0 100644
--- a/src/armnnTfLiteParser/test/Mean.cpp
+++ b/src/armnnTfLiteParser/test/Mean.cpp
@@ -91,9 +91,8 @@ struct SimpleMeanNoReduceFixture : public MeanNoReduceFixture
BOOST_FIXTURE_TEST_CASE(ParseMeanNoReduce, SimpleMeanNoReduceFixture)
{
- RunTest<2, float>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
- {{ "outputTensor", { 1.5f } } });
+ RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
+ {{ "outputTensor", { 1.5f } } });
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/Multiplication.cpp b/src/armnnTfLiteParser/test/Multiplication.cpp
index 802799c2b4..f7e2edd546 100644
--- a/src/armnnTfLiteParser/test/Multiplication.cpp
+++ b/src/armnnTfLiteParser/test/Multiplication.cpp
@@ -94,19 +94,18 @@ struct SimpleMultiplicationFixture : public MultiplicationFixture
BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture)
{
- RunTest<4, float>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f,
- 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f } },
- { "inputTensor2", { 1.0f, 1.0f, 1.0f,
- 5.0f, 5.0f, 5.0f,
- 1.0f, 1.0f, 1.0f,
- 5.0f, 5.0f, 5.0f} } },
- {{ "outputTensor", { 0.0f, 1.0f, 2.0f,
- 15.0f, 20.0f, 25.0f,
- 6.0f, 7.0f, 8.0f,
- 45.0f, 50.0f, 55.0f } } });
+ RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } },
+ { "inputTensor2", { 1.0f, 1.0f, 1.0f,
+ 5.0f, 5.0f, 5.0f,
+ 1.0f, 1.0f, 1.0f,
+ 5.0f, 5.0f, 5.0f} } },
+ {{ "outputTensor", { 0.0f, 1.0f, 2.0f,
+ 15.0f, 20.0f, 25.0f,
+ 6.0f, 7.0f, 8.0f,
+ 45.0f, 50.0f, 55.0f } } });
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp
index 09b744a7ce..bdc8478ca2 100644
--- a/src/armnnTfLiteParser/test/Pad.cpp
+++ b/src/armnnTfLiteParser/test/Pad.cpp
@@ -92,13 +92,13 @@ struct SimplePadFixture : public PadFixture
BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
{
- RunTest<2, float>(0,
- {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
- {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
+ RunTest<2, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
+ {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index b372a604f3..8d0ee01aa9 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -14,6 +14,7 @@
#include <armnn/TypesUtils.hpp>
#include "test/TensorHelpers.hpp"
+#include "TypeUtils.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include <backendsCommon/BackendRegistry.hpp>
@@ -116,14 +117,18 @@ struct ParserFlatbuffersFixture
/// Executes the network with the given input tensor and checks the result against the given output tensor.
/// This overload assumes the network has a single input and a single output.
- template <std::size_t NumOutputDimensions, typename DataType>
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
void RunTest(size_t subgraphId,
- const std::vector<DataType>& inputData,
- const std::vector<DataType>& expectedOutputData);
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData);
/// Executes the network with the given input tensors and checks the results against the given output tensors.
/// This overload supports multiple inputs and multiple outputs, identified by name.
- template <std::size_t NumOutputDimensions, typename DataType>
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
void RunTest(size_t subgraphId,
const std::map<std::string, std::vector<DataType>>& inputData,
const std::map<std::string, std::vector<DataType>>& expectedOutputData);
@@ -152,21 +157,24 @@ struct ParserFlatbuffersFixture
}
};
-template <std::size_t NumOutputDimensions, typename DataType>
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
const std::vector<DataType>& inputData,
const std::vector<DataType>& expectedOutputData)
{
- RunTest<NumOutputDimensions, DataType>(subgraphId,
- { { m_SingleInputName, inputData } },
- { { m_SingleOutputName, expectedOutputData } });
+ RunTest<NumOutputDimensions, ArmnnType>(subgraphId,
+ { { m_SingleInputName, inputData } },
+ { { m_SingleOutputName, expectedOutputData } });
}
-template <std::size_t NumOutputDimensions, typename DataType>
-void
-ParserFlatbuffersFixture::RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType>>& inputData,
- const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
+void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData)
{
using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
@@ -175,7 +183,7 @@ ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : inputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
@@ -185,7 +193,7 @@ ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp
index ae5a09a711..ef4b761945 100644
--- a/src/armnnTfLiteParser/test/Reshape.cpp
+++ b/src/armnnTfLiteParser/test/Reshape.cpp
@@ -86,9 +86,9 @@ struct ReshapeFixtureWithReshapeDims : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({3,3})));
}
@@ -101,9 +101,9 @@ struct ReshapeFixtureWithReshapeDimsFlatten : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({1,9})));
}
@@ -116,9 +116,9 @@ struct ReshapeFixtureWithReshapeDimsFlattenTwoDims : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,9})));
}
@@ -131,9 +131,9 @@ struct ReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, uint8_t>(0,
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,3,3})));
}
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
index 957e61b944..dacd946352 100644
--- a/src/armnnTfLiteParser/test/Softmax.cpp
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -71,8 +71,7 @@ struct SoftmaxFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
{
- RunTest<2, uint8_t>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
index 5ee74243c4..7f6fb276fc 100644
--- a/src/armnnTfLiteParser/test/Squeeze.cpp
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -85,7 +85,7 @@ struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2,1})));
@@ -99,7 +99,7 @@ struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2})));
}