aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnTfLiteParser/test')
-rw-r--r--src/armnnTfLiteParser/test/AvgPool2D.cpp119
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp351
-rw-r--r--src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp199
-rw-r--r--src/armnnTfLiteParser/test/GetBuffer.cpp126
-rw-r--r--src/armnnTfLiteParser/test/GetInputsOutputs.cpp239
-rw-r--r--src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp230
-rw-r--r--src/armnnTfLiteParser/test/GetTensorIds.cpp162
-rw-r--r--src/armnnTfLiteParser/test/InputOutputTensorNames.cpp138
-rw-r--r--src/armnnTfLiteParser/test/LoadModel.cpp241
-rw-r--r--src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp61
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp229
-rw-r--r--src/armnnTfLiteParser/test/Softmax.cpp78
-rw-r--r--src/armnnTfLiteParser/test/Squeeze.cpp144
13 files changed, 2317 insertions, 0 deletions
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
new file mode 100644
index 0000000000..ba6d2ae40a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include "ParserFlatbuffersFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct AvgPool2DFixture : public ParserFlatbuffersFixture
+{
+ explicit AvgPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )"
+ + outputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": )"
+ + inputdim
+ + R"(,
+ "type": )"
+ + dataType
+ + R"(,
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ }
+ ],
+ "description": "AvgPool2D test.",
+ "buffers" : [ {}, {} ]
+ })";
+
+ SetupSingleInputSingleOutput("InputTensor", "OutputTensor");
+ }
+};
+
+
+struct AvgPoolLiteFixtureUint1DOutput : AvgPool2DFixture
+{
+ AvgPoolLiteFixtureUint1DOutput() : AvgPool2DFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]", "UINT8") {}
+};
+
+struct AvgPoolLiteFixtureFloat1DOutput : AvgPool2DFixture
+{
+ AvgPoolLiteFixtureFloat1DOutput() : AvgPool2DFixture("[ 1, 2, 2, 1 ]", "[ 1, 1, 1, 1 ]", "FLOAT32") {}
+};
+
+struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
+{
+ AvgPoolLiteFixture2DOutput() : AvgPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
+{
+ RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
+{
+ RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
+{
+ RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
+{
+ BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
new file mode 100644
index 0000000000..8a17dec47a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -0,0 +1,351 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+#include <sstream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SimpleConv2DFixture : public ParserFlatbuffersFixture
+{
+ explicit SimpleConv2DFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
+{
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+ },
+ // because of the output scaling we need to take half of the values
+ {
+ (1*2 + 2*1 + 3*0 +
+ 4*6 + 5*2 + 6*1 +
+ 7*4 + 8*1 + 9*2) /2
+ });
+}
+
+struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
+{
+ explicit Conv2DWithBiasesFixture(const std::string & inputShape,
+ const std::string & outputShape,
+ const std::string & filterShape,
+ const std::string & filterData,
+ const std::string & biasShape,
+ const std::string & biasData,
+ const std::string & strides,
+ const std::string & activation="NONE",
+ const std::string & filterScale="1.0",
+ const std::string & filterZeroPoint="0",
+ const std::string & outputScale="2.0",
+ const std::string & outputZeroPoint="0")
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ )" + outputScale + R"( ],
+ "zero_point": [ )" + outputZeroPoint + R"( ],
+ }
+ },
+ {
+ "shape": )" + filterShape + R"( ,
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + filterScale + R"( ],
+ "zero_point": [ )" + filterZeroPoint + R"( ],
+ }
+ },
+ {
+ "shape": )" + biasShape + R"( ,
+ "type": "INT32",
+ "buffer": 3,
+ "name": "biasTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2, 3 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "SAME",
+ "stride_w": )" + strides + R"(,
+ "stride_h": )" + strides + R"(,
+ "fused_activation_function": )" + activation + R"(
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + filterData + R"(, },
+ { "data": )" + biasData + R"(, },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+ SimpleConv2DWithBiasesFixture()
+ : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 2, 2, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 2,1, 0,6 ]", // filterData
+ "[ 1 ]", // biasShape
+ "[ 10, 0, 0, 0 ]", // biasData
+ "1") // stride w and h
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
+{
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2,
+ 3, 4,
+ },
+ // because of the output scaling we need to take half of the values
+ {
+ (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
+ (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
+ (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
+ (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
+ });
+}
+
+struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
+{
+ static std::string GenerateInts(unsigned int n)
+ {
+ std::stringstream ss;
+ ss << " [ ";
+ for( unsigned int i=0; i<n; ++i ) {
+ if (i > 0 )
+ {
+ ss << " , ";
+ }
+ ss << " " << (i%256);
+ }
+ ss << " ] ";
+ return ss.str();
+ }
+
+ Conv2DShapeTestFixture()
+ : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
+ "[ 1, 112, 112, 32 ]", // outputShape
+ "[ 32, 3, 3, 3 ]", // filterShape
+ GenerateInts(32*3*3*3), // filterData
+ "[ 32 ]", // biasShape
+ GenerateInts(32*4), // biasData
+ "2") // stride w and h
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2D_112x112_out, Conv2DShapeTestFixture )
+{
+}
+
+struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+ ReluConv2DWithBiasesFixture()
+ : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 2, 2, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 2,1, 0,6 ]", // filterData
+ "[ 1 ]", // biasShape
+ "[ 16, 0, 0, 0 ]", // biasData
+ "1", // stride w and h
+ "RELU", // activation
+ "1.0", // filter scale
+ "4", // filter zero point
+ "2.0", // output scale
+ "20") // output zero point
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture )
+{
+ uint8_t bias = 16;
+ uint8_t outZero = 20;
+ uint8_t fz = 4; // filter zero point
+
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2,
+ 4, 8,
+ },
+ // factors to consider:
+ // - the filter zero point is non zero, hence the (x-fz)
+ // - the output scale is 2 hence the /2
+ // - output zero point is non zero, hence the +outZero
+ // - RELU cuts negative values and then we add the output zero point
+ {
+ std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
+ std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
+ std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
+ std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
+ });
+}
+
+struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+ Relu6Conv2DWithBiasesFixture()
+ : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 2, 2, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 2,1, 0,6 ]", // filterData
+ "[ 1 ]", // biasShape
+ "[ 0, 0, 0, 0 ]", // biasData
+ "1", // stride w and h
+ "RELU6", // activation
+ "1.0", // filter scale
+ "0", // filter zero point
+ "2.0", // output scale
+ "0") // output zero point
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixture )
+{
+ uint8_t relu6Min = 6 / 2; // divide by output scale
+
+ RunTest<4, uint8_t>(
+ 0,
+ {
+ 1, 2,
+ 4, 1,
+ },
+ // factors to consider:
+ // - the output scale is 2 hence the /2
+ // - RELU6 cuts output values at +6
+ {
+ std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
+ std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
+ std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
+ std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
+ });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
new file mode 100644
index 0000000000..4a06418095
--- /dev/null
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
+{
+ explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& filterShape,
+ const std::string& filterData,
+ const std::string& strides,
+ const std::string& paddingType,
+ const std::string biasShape = "",
+ const std::string biasData = "")
+ {
+ std::string inputTensors = "[ 0, 2 ]";
+ std::string biasTensor = "";
+ std::string biasBuffer = "";
+ if (biasShape.size() > 0 && biasData.size() > 0)
+ {
+ inputTensors = "[ 0, 2, 3 ]";
+ biasTensor = R"(
+ {
+ "shape": )" + biasShape + R"( ,
+ "type": "INT32",
+ "buffer": 3,
+ "name": "biasTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ } )";
+ biasBuffer = R"(
+ { "data": )" + biasData + R"(, }, )";
+ }
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + filterShape + R"(,
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }, )" + biasTensor + R"(
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": )" + inputTensors + R"(,
+ "outputs": [ 1 ],
+ "builtin_options_type": "DepthwiseConv2DOptions",
+ "builtin_options": {
+ "padding": ")" + paddingType + R"(",
+ "stride_w": )" + strides+ R"(,
+ "stride_h": )" + strides+ R"(,
+ "depth_multiplier": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + filterData + R"(, }, )"
+ + biasBuffer + R"(
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dSameFixture()
+ : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 3, 3, 1 ]", // outputShape
+ "[ 1, 3, 3, 1 ]", // filterShape
+ "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
+ "1", // stride w and h
+ "SAME") // padding type
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
+{
+ RunTest<4, uint8_t>(
+ 0,
+ { 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 8 },
+ // the expected values were generated using the example python implementation at
+ // https://eli.thegreenplace.net/2018/depthwise-separable-convolutions-for-machine-learning/
+ // divide the expected values by the output scale, as it is not 1.0
+ { 14/2, 35/2, 38/2,
+ 57/2, 120/2, 111/2,
+ 110/2, 197/2, 158/2 });
+}
+
+struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dValidFixture ()
+ : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 1, 1, 1 ]", // outputShape
+ "[ 1, 3, 3, 1 ]", // filterShape
+ "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
+ "1", // stride w and h
+ "VALID") // padding type
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
+{
+ RunTest<4, uint8_t>(
+ 0,
+ { 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 8 },
+ // divide the expected values by the output scale, as it is not 1.0
+ { 120/2 });
+}
+
+struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dSameBiasFixture()
+ : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
+ "[ 1, 3, 3, 1 ]", // outputShape
+ "[ 1, 3, 3, 1 ]", // filterShape
+ "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
+ "1", // stride w and h
+ "SAME", // padding type
+ "[ 1 ]", // biasShape
+ "[ 10, 0, 0, 0 ]") // biasData
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
+{
+ RunTest<4, uint8_t>(
+ 0,
+ { 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 8 },
+ // divide the expected values by the output scale, as it is not 1.0
+ { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
+ ( 57+10)/2, (120+10)/2, (111+10)/2,
+ (110+10)/2, (197+10)/2, (158+10)/2 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/GetBuffer.cpp b/src/armnnTfLiteParser/test/GetBuffer.cpp
new file mode 100644
index 0000000000..7486f01b52
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetBuffer.cpp
@@ -0,0 +1,126 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+#include <sstream>
+
+using armnnTfLiteParser::TfLiteParser;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetBufferFixture : public ParserFlatbuffersFixture
+{
+ explicit GetBufferFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ }
+ )";
+ ReadStringToBinary();
+ }
+
+ void CheckBufferContents(const TfLiteParser::ModelPtr& model,
+ std::vector<int32_t> bufferValues, size_t bufferIndex)
+ {
+ for(long unsigned int i=0; i<bufferValues.size(); i++)
+ {
+ BOOST_CHECK_EQUAL(TfLiteParser::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
+ }
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
+{
+ //Check contents of buffer are correct
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> bufferValues = {2,1,0,6,2,1,4,1,2};
+ CheckBufferContents(model, bufferValues, 2);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetBufferCheckEmpty, GetBufferFixture)
+{
+ //Check if test fixture buffers are empty or not
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK(TfLiteParser::GetBuffer(model, 0)->data.empty());
+ BOOST_CHECK(TfLiteParser::GetBuffer(model, 1)->data.empty());
+ BOOST_CHECK(!TfLiteParser::GetBuffer(model, 2)->data.empty());
+ BOOST_CHECK(TfLiteParser::GetBuffer(model, 3)->data.empty());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetBufferCheckParseException, GetBufferFixture)
+{
+ //Check if armnn::ParseException thrown when invalid buffer index used
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetBuffer(model, 4)->data.empty(), armnn::Exception);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
new file mode 100644
index 0000000000..2c12c1976a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
@@ -0,0 +1,239 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetInputsOutputsMainFixture : public ParserFlatbuffersFixture
+{
+ explicit GetInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ -1.2 ],
+ "max": [ 25.5 ],
+ "scale": [ 0.25 ],
+ "zero_point": [ 10 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": )"
+ + inputs
+ + R"(,
+ "outputs": )"
+ + outputs
+ + R"(,
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ },
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "ConvInputTensor",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "ConvOutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ }
+ ],
+ "description": "Test Subgraph Inputs Outputs",
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ })";
+
+ ReadStringToBinary();
+ }
+
+};
+
+struct GetEmptyInputsOutputsFixture : GetInputsOutputsMainFixture
+{
+ GetEmptyInputsOutputsFixture() : GetInputsOutputsMainFixture("[ ]", "[ ]") {}
+};
+
+struct GetInputsOutputsFixture : GetInputsOutputsMainFixture
+{
+ GetInputsOutputsFixture() : GetInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(0, tensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(0, tensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 1, 0);
+ BOOST_CHECK_EQUAL(2, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
+ CheckTensors(tensors[1], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 2,
+ "filterTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 1, 0);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
+}
+
+BOOST_AUTO_TEST_CASE(GetInputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetInputs(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputs(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 2, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 2, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
new file mode 100644
index 0000000000..7e6808d11e
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
@@ -0,0 +1,230 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+using TensorRawPtr = TfLiteParser::TensorRawPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetSubgraphInputsOutputsMainFixture : public ParserFlatbuffersFixture
+{
+ explicit GetSubgraphInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ -1.2 ],
+ "max": [ 25.5 ],
+ "scale": [ 0.25 ],
+ "zero_point": [ 10 ]
+ }
+ }
+ ],
+ "inputs": )"
+ + inputs
+ + R"(,
+ "outputs": )"
+ + outputs
+ + R"(,
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ },
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "ConvInputTensor",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "ConvOutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ }
+ ],
+ "description": "Test Subgraph Inputs Outputs",
+ "buffers" : [
+ { },
+ { },
+ { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
+ { },
+ ]
+ })";
+
+ ReadStringToBinary();
+ }
+
+};
+
+struct GetEmptySubgraphInputsOutputsFixture : GetSubgraphInputsOutputsMainFixture
+{
+ GetEmptySubgraphInputsOutputsFixture() : GetSubgraphInputsOutputsMainFixture("[ ]", "[ ]") {}
+};
+
+struct GetSubgraphInputsOutputsFixture : GetSubgraphInputsOutputsMainFixture
+{
+ GetSubgraphInputsOutputsFixture() : GetSubgraphInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphInputs, GetEmptySubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+ BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphOutputs, GetEmptySubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+ BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 1);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
+ "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 1);
+ BOOST_CHECK_EQUAL(1, subgraphTensors.size());
+ BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+ CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
+ "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
+}
+
+BOOST_AUTO_TEST_CASE(GetSubgraphInputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_CASE(GetSubgraphOutputsNullModel)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(model, 2), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(model, 2), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetTensorIds.cpp b/src/armnnTfLiteParser/test/GetTensorIds.cpp
new file mode 100644
index 0000000000..2d123111d3
--- /dev/null
+++ b/src/armnnTfLiteParser/test/GetTensorIds.cpp
@@ -0,0 +1,162 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct GetTensorIdsFixture : public ParserFlatbuffersFixture
+{
+ explicit GetTensorIdsFixture(const std::string& inputs, const std::string& outputs)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": )"
+ + inputs
+ + R"(,
+ "outputs": )"
+ + outputs
+ + R"(,
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ }
+ ],
+ "description": "Test loading a model",
+ "buffers" : [ {}, {} ]
+ })";
+
+ ReadStringToBinary();
+ }
+};
+
+struct GetEmptyTensorIdsFixture : GetTensorIdsFixture
+{
+ GetEmptyTensorIdsFixture() : GetTensorIdsFixture("[ ]", "[ ]") {}
+};
+
+struct GetInputOutputTensorIdsFixture : GetTensorIdsFixture
+{
+ GetInputOutputTensorIdsFixture() : GetTensorIdsFixture("[ 0, 1, 2 ]", "[ 3 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedIds = { };
+ std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
+ inputTensorIds.begin(), inputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedIds = { };
+ std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
+ outputTensorIds.begin(), outputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
+ std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
+ inputTensorIds.begin(), inputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ std::vector<int32_t> expectedOutputIds = { 3 };
+ std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+ BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
+ outputTensorIds.begin(), outputTensorIds.end());
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+{
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubGraph, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 1, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubGraph, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
new file mode 100644
index 0000000000..fc88a4e58d
--- /dev/null
+++ b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
@@ -0,0 +1,138 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct EmptyNetworkFixture : public ParserFlatbuffersFixture
+{
+ explicit EmptyNetworkFixture() {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [],
+ "subgraphs": [ {} ]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(EmptyNetworkHasNoInputsAndOutputs, EmptyNetworkFixture)
+{
+ Setup();
+ BOOST_TEST(m_Parser->GetSubgraphCount() == 1);
+ BOOST_TEST(m_Parser->GetSubgraphInputTensorNames(0).size() == 0);
+ BOOST_TEST(m_Parser->GetSubgraphOutputTensorNames(0).size() == 0);
+}
+
+struct MissingTensorsFixture : public ParserFlatbuffersFixture
+{
+ explicit MissingTensorsFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [],
+ "subgraphs": [{
+ "inputs" : [ 0, 1 ],
+ "outputs" : [ 2, 3 ],
+ }]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MissingTensorsThrowException, MissingTensorsFixture)
+{
+ // this throws because it cannot do the input output tensor connections
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+struct InvalidTensorsFixture : public ParserFlatbuffersFixture
+{
+ explicit InvalidTensorsFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ ],
+ "subgraphs": [{
+ "tensors": [ {}, {}, {}, {} ],
+ "inputs" : [ 0, 1 ],
+ "outputs" : [ 2, 3 ],
+ }]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(InvalidTensorsThrowException, InvalidTensorsFixture)
+{
+ // this throws because it cannot do the input output tensor connections
+ BOOST_CHECK_THROW(Setup(), armnn::InvalidArgumentException);
+}
+
+struct ValidTensorsFixture : public ParserFlatbuffersFixture
+{
+ explicit ValidTensorsFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
+ "subgraphs": [{
+ "tensors": [ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "FLOAT32",
+ "name": "In",
+ "buffer": 0,
+ }, {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "FLOAT32",
+ "name": "Out",
+ "buffer": 1,
+ }],
+ "inputs" : [ 0 ],
+ "outputs" : [ 1 ],
+ "operators": [{
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "filter_width": 1,
+ "filter_height": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }]
+ }]
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GetValidInputOutputTensorNames, ValidTensorsFixture)
+{
+ Setup();
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0).size(), 1u);
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[0], "In");
+ BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
+}
+
+BOOST_FIXTURE_TEST_CASE(ThrowIfSubgraphIdInvalidForInOutNames, ValidTensorsFixture)
+{
+ Setup();
+
+ // these throw because of the invalid subgraph id
+ BOOST_CHECK_THROW(m_Parser->GetSubgraphInputTensorNames(1), armnn::ParseException);
+ BOOST_CHECK_THROW(m_Parser->GetSubgraphOutputTensorNames(1), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/LoadModel.cpp b/src/armnnTfLiteParser/test/LoadModel.cpp
new file mode 100644
index 0000000000..a87eba83ac
--- /dev/null
+++ b/src/armnnTfLiteParser/test/LoadModel.cpp
@@ -0,0 +1,241 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <unistd.h>
+
+using armnnTfLiteParser::TfLiteParser;
+using ModelPtr = TfLiteParser::ModelPtr;
+using SubGraphPtr = TfLiteParser::SubGraphPtr;
+using OperatorPtr = TfLiteParser::OperatorPtr;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct LoadModelFixture : public ParserFlatbuffersFixture
+{
+ explicit LoadModelFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 1, 1, 1 ] ,
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "OutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ },
+ {
+ "shape": [ 1, 2, 2, 1 ] ,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "InputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ]
+ }
+ }
+ ],
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "operators": [ {
+ "opcode_index": 0,
+ "inputs": [ 1 ],
+ "outputs": [ 0 ],
+ "builtin_options_type": "Pool2DOptions",
+ "builtin_options":
+ {
+ "padding": "VALID",
+ "stride_w": 2,
+ "stride_h": 2,
+ "filter_width": 2,
+ "filter_height": 2,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ } ]
+ },
+ {
+ "tensors": [
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "ConvInputTensor",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 1, 1, 1 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "ConvOutputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 511.0 ],
+ "scale": [ 2.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 3, 3, 1 ],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 1,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "VALID",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ }
+ ],
+ "description": "Test loading a model",
+ "buffers" : [ {}, {} ]
+ })";
+
+ ReadStringToBinary();
+ }
+
+ void CheckModel(const ModelPtr& model, uint32_t version, size_t opcodeSize,
+ const std::vector<tflite::BuiltinOperator>& opcodes,
+ size_t subgraphs, const std::string desc, size_t buffers)
+ {
+ BOOST_CHECK(model);
+ BOOST_CHECK_EQUAL(version, model->version);
+ BOOST_CHECK_EQUAL(opcodeSize, model->operator_codes.size());
+ CheckBuiltinOperators(opcodes, model->operator_codes);
+ BOOST_CHECK_EQUAL(subgraphs, model->subgraphs.size());
+ BOOST_CHECK_EQUAL(desc, model->description);
+ BOOST_CHECK_EQUAL(buffers, model->buffers.size());
+ }
+
+ void CheckBuiltinOperators(const std::vector<tflite::BuiltinOperator>& expectedOperators,
+ const std::vector<std::unique_ptr<tflite::OperatorCodeT>>& result)
+ {
+ BOOST_CHECK_EQUAL(expectedOperators.size(), result.size());
+ for (size_t i = 0; i < expectedOperators.size(); i++)
+ {
+ BOOST_CHECK_EQUAL(expectedOperators[i], result[i]->builtin_code);
+ }
+ }
+
+ void CheckSubgraph(const SubGraphPtr& subgraph, size_t tensors, const std::vector<int32_t>& inputs,
+ const std::vector<int32_t>& outputs, size_t operators, const std::string& name)
+ {
+ BOOST_CHECK(subgraph);
+ BOOST_CHECK_EQUAL(tensors, subgraph->tensors.size());
+ BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(), subgraph->inputs.begin(), subgraph->inputs.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
+ subgraph->outputs.begin(), subgraph->outputs.end());
+ BOOST_CHECK_EQUAL(operators, subgraph->operators.size());
+ BOOST_CHECK_EQUAL(name, subgraph->name);
+ }
+
+ void CheckOperator(const OperatorPtr& operatorPtr, uint32_t opcode, const std::vector<int32_t>& inputs,
+ const std::vector<int32_t>& outputs, tflite::BuiltinOptions optionType,
+ tflite::CustomOptionsFormat custom_options_format)
+ {
+ BOOST_CHECK(operatorPtr);
+ BOOST_CHECK_EQUAL(opcode, operatorPtr->opcode_index);
+ BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(),
+ operatorPtr->inputs.begin(), operatorPtr->inputs.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
+ operatorPtr->outputs.begin(), operatorPtr->outputs.end());
+ BOOST_CHECK_EQUAL(optionType, operatorPtr->builtin_options.type);
+ BOOST_CHECK_EQUAL(custom_options_format, operatorPtr->custom_options_format);
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
+{
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+ CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
+ 2, "Test loading a model", 2);
+ CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
+ CheckSubgraph(model->subgraphs[1], 3, { 0 }, { 1 }, 1, "");
+ CheckOperator(model->subgraphs[0]->operators[0], 0, { 1 }, { 0 }, tflite::BuiltinOptions_Pool2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+ CheckOperator(model->subgraphs[1]->operators[0], 1, { 0, 2 }, { 1 }, tflite::BuiltinOptions_Conv2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+}
+
+BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
+{
+ std::string fname = boost::filesystem::temp_directory_path().string() + "/testtflite.tflite";
+ bool saved = flatbuffers::SaveFile(fname.c_str(),
+ reinterpret_cast<char *>(m_GraphBinary.data()),
+ m_GraphBinary.size(), true);
+ BOOST_CHECK_MESSAGE(saved, "Cannot save test file");
+
+ TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromFile(fname.c_str());
+ CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
+ 2, "Test loading a model", 2);
+ CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
+ CheckSubgraph(model->subgraphs[1], 3, { 0 }, { 1 }, 1, "");
+ CheckOperator(model->subgraphs[0]->operators[0], 0, { 1 }, { 0 }, tflite::BuiltinOptions_Pool2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+ CheckOperator(model->subgraphs[1]->operators[0], 1, { 0, 2 }, { 1 }, tflite::BuiltinOptions_Conv2DOptions,
+ tflite::CustomOptionsFormat_FLEXBUFFERS);
+ remove(fname.c_str());
+}
+
+BOOST_AUTO_TEST_CASE(LoadNullBinary)
+{
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(LoadInvalidBinary)
+{
+ std::string testData = "invalid data";
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
+ testData.length()), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_CASE(LoadFileNotFound)
+{
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
+}
+
+BOOST_AUTO_TEST_CASE(LoadNullPtrFile)
+{
+ BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
new file mode 100644
index 0000000000..590675b46c
--- /dev/null
+++ b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
@@ -0,0 +1,61 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "../TfLiteParser.hpp"
+#include <iostream>
+#include <string>
+
+struct TfLiteParserFixture
+{
+
+ armnnTfLiteParser::TfLiteParser m_Parser;
+ unsigned int m_InputShape[4];
+
+ TfLiteParserFixture() : m_Parser( ), m_InputShape { 1, 2, 2, 1 } {
+ m_Parser.Create();
+ }
+ ~TfLiteParserFixture() { }
+
+};
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser);
+
+
+BOOST_FIXTURE_TEST_CASE( EmptySqueezeDims_OutputWithAllDimensionsSqueezed, TfLiteParserFixture )
+{
+
+ std::vector<uint32_t> squeezeDims = { };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
+ BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
+ BOOST_TEST(outputTensorInfo.GetNumDimensions() == 2);
+ BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 2, 2 })));
+};
+
+BOOST_FIXTURE_TEST_CASE( SqueezeDimsNotIncludingSizeOneDimensions_NoDimensionsSqueezedInOutput, TfLiteParserFixture )
+{
+ std::vector<uint32_t> squeezeDims = { 1, 2 };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
+ BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
+ BOOST_TEST(outputTensorInfo.GetNumDimensions() == 4);
+ BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+};
+
+BOOST_FIXTURE_TEST_CASE( SqueezeDimsRangePartial_OutputWithDimensionsWithinRangeSqueezed, TfLiteParserFixture )
+{
+ std::vector<uint32_t> squeezeDims = { 1, 3 };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
+ BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
+ BOOST_TEST(outputTensorInfo.GetNumDimensions() == 3);
+ BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2 })));
+};
+
+BOOST_AUTO_TEST_SUITE_END(); \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
new file mode 100644
index 0000000000..3687a6ed00
--- /dev/null
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -0,0 +1,229 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <boost/filesystem.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+#include <experimental/filesystem>
+#include <armnn/IRuntime.hpp>
+#include <armnn/TypesUtils.hpp>
+#include "test/TensorHelpers.hpp"
+
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+
+#include <schema_generated.h>
+#include <iostream>
+
+using armnnTfLiteParser::ITfLiteParser;
+using TensorRawPtr = const tflite::TensorT *;
+
+struct ParserFlatbuffersFixture
+{
+ ParserFlatbuffersFixture()
+ : m_Parser(ITfLiteParser::Create()), m_NetworkIdentifier(-1)
+ {
+ armnn::IRuntime::CreationOptions options;
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuRef));
+
+#if ARMCOMPUTENEON_ENABLED
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuAcc));
+#endif
+
+#if ARMCOMPUTECL_ENABLED
+ m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::GpuAcc));
+#endif
+ }
+
+ std::vector<uint8_t> m_GraphBinary;
+ std::string m_JsonString;
+ std::unique_ptr<ITfLiteParser, void (*)(ITfLiteParser *parser)> m_Parser;
+ std::vector<std::pair<armnn::IRuntimePtr, armnn::Compute>> m_Runtimes;
+ armnn::NetworkId m_NetworkIdentifier;
+
+ /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
+ /// so they don't need to be passed to the single-input-single-output overload of RunTest().
+ std::string m_SingleInputName;
+ std::string m_SingleOutputName;
+
+ void Setup()
+ {
+ bool ok = ReadStringToBinary();
+ if (!ok) {
+ throw armnn::Exception("LoadNetwork failed while reading binary input");
+ }
+
+ for (auto&& runtime : m_Runtimes)
+ {
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+
+ if (!network) {
+ throw armnn::Exception("The parser failed to create an ArmNN network");
+ }
+
+ auto optimized = Optimize(*network,
+ { runtime.second, armnn::Compute::CpuRef },
+ runtime.first->GetDeviceSpec());
+ std::string errorMessage;
+
+ armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier,
+ move(optimized),
+ errorMessage);
+
+ if (ret != armnn::Status::Success)
+ {
+ throw armnn::Exception(
+ boost::str(
+ boost::format("The runtime failed to load the network. "
+ "Error was: %1%. in %2% [%3%:%4%]") %
+ errorMessage %
+ __func__ %
+ __FILE__ %
+ __LINE__));
+ }
+ }
+ }
+
+ void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName)
+ {
+ // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+ m_SingleInputName = inputName;
+ m_SingleOutputName = outputName;
+ Setup();
+ }
+
+ bool ReadStringToBinary()
+ {
+ const char* schemafileName = getenv("ARMNN_TF_LITE_SCHEMA_PATH");
+ if (schemafileName == nullptr)
+ {
+ schemafileName = ARMNN_TF_LITE_SCHEMA_PATH;
+ }
+ std::string schemafile;
+
+ bool ok = flatbuffers::LoadFile(schemafileName, false, &schemafile);
+ BOOST_ASSERT_MSG(ok, "Couldn't load schema file " ARMNN_TF_LITE_SCHEMA_PATH);
+ if (!ok)
+ {
+ return false;
+ }
+
+ // parse schema first, so we can use it to parse the data after
+ flatbuffers::Parser parser;
+
+ ok &= parser.Parse(schemafile.c_str());
+ BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+
+ ok &= parser.Parse(m_JsonString.c_str());
+ BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+
+ if (!ok)
+ {
+ return false;
+ }
+
+ {
+ const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
+ size_t size = static_cast<size_t>(parser.builder_.GetSize());
+ m_GraphBinary.assign(bufferPtr, bufferPtr+size);
+ }
+ return ok;
+ }
+
+ /// Executes the network with the given input tensor and checks the result against the given output tensor.
+ /// This overload assumes the network has a single input and a single output.
+ template <std::size_t NumOutputDimensions, typename DataType>
+ void RunTest(size_t subgraphId,
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData);
+
+ /// Executes the network with the given input tensors and checks the results against the given output tensors.
+ /// This overload supports multiple inputs and multiple outputs, identified by name.
+ template <std::size_t NumOutputDimensions, typename DataType>
+ void RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData);
+
+ void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
+ tflite::TensorType tensorType, uint32_t buffer, const std::string& name,
+ const std::vector<float>& min, const std::vector<float>& max,
+ const std::vector<float>& scale, const std::vector<int64_t>& zeroPoint)
+ {
+ BOOST_CHECK(tensors);
+ BOOST_CHECK_EQUAL(shapeSize, tensors->shape.size());
+ BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end());
+ BOOST_CHECK_EQUAL(tensorType, tensors->type);
+ BOOST_CHECK_EQUAL(buffer, tensors->buffer);
+ BOOST_CHECK_EQUAL(name, tensors->name);
+ BOOST_CHECK(tensors->quantization);
+ BOOST_CHECK_EQUAL_COLLECTIONS(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
+ tensors->quantization.get()->min.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
+ tensors->quantization.get()->max.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
+ tensors->quantization.get()->scale.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(zeroPoint.begin(), zeroPoint.end(),
+ tensors->quantization.get()->zero_point.begin(),
+ tensors->quantization.get()->zero_point.end());
+ }
+};
+
+template <std::size_t NumOutputDimensions, typename DataType>
+void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData)
+{
+ RunTest<NumOutputDimensions, DataType>(subgraphId,
+ { { m_SingleInputName, inputData } },
+ { { m_SingleOutputName, expectedOutputData } });
+}
+
+template <std::size_t NumOutputDimensions, typename DataType>
+void
+ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+{
+ for (auto&& runtime : m_Runtimes)
+ {
+ using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
+ armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+ std::map<std::string, boost::multi_array<DataType, NumOutputDimensions>> outputStorage;
+ armnn::OutputTensors outputTensors;
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
+ armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
+ outputTensors.push_back(
+ { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+ }
+
+ runtime.first->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Compare each output tensor to the expected values
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
+ auto outputExpected = MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second, it.second);
+ BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ }
+ }
+}
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
new file mode 100644
index 0000000000..bb47738cf1
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SoftmaxFixture : public ParserFlatbuffersFixture
+{
+ explicit SoftmaxFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "SOFTMAX" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": [ 1, 7 ],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 1, 7 ],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 0.00390625 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "SoftmaxOptions",
+ "builtin_options": {
+ "beta": 1.0
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [ {}, {} ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
+{
+ RunTest<2, uint8_t>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
new file mode 100644
index 0000000000..a8c99793ad
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -0,0 +1,144 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SqueezeFixture : public ParserFlatbuffersFixture
+{
+ explicit SqueezeFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& squeezeDims)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "SQUEEZE" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {)";
+ m_JsonString += R"(
+ "shape" : )" + inputShape + ",";
+ m_JsonString += R"(
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {)";
+ m_JsonString += R"(
+ "shape" : )" + outputShape;
+ m_JsonString += R"(,
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "SqueezeOptions",
+ "builtin_options": {)";
+ if (!squeezeDims.empty())
+ {
+ m_JsonString += R"("squeeze_dims" : )" + squeezeDims;
+ }
+ m_JsonString += R"(},
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [ {}, {} ]
+ }
+ )";
+ }
+};
+
+struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
+{
+ SqueezeFixtureWithSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2, 1 ]", "[ 0, 1, 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
+{
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ RunTest<3, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ == armnn::TensorShape({2,2,1})));
+
+}
+
+struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
+{
+ SqueezeFixtureWithoutSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2 ]", "") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
+{
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ RunTest<2, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ == armnn::TensorShape({2,2})));
+}
+
+struct SqueezeFixtureWithInvalidInput : SqueezeFixture
+{
+ SqueezeFixtureWithInvalidInput() : SqueezeFixture("[ 1, 2, 2, 1, 2 ]", "[ 1, 2, 2, 1 ]", "[ ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidInput, SqueezeFixtureWithInvalidInput)
+{
+ BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")),
+ armnn::InvalidArgumentException);
+}
+
+struct SqueezeFixtureWithSqueezeDimsSizeInvalid : SqueezeFixture
+{
+ SqueezeFixtureWithSqueezeDimsSizeInvalid() : SqueezeFixture("[ 1, 2, 2, 1 ]",
+ "[ 1, 2, 2, 1 ]",
+ "[ 1, 2, 2, 2, 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidSqueezeDims, SqueezeFixtureWithSqueezeDimsSizeInvalid)
+{
+ BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+}
+
+
+struct SqueezeFixtureWithNegativeSqueezeDims : SqueezeFixture
+{
+ SqueezeFixtureWithNegativeSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]",
+ "[ 1, 2, 2, 1 ]",
+ "[ -2 , 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSqueezeNegativeSqueezeDims, SqueezeFixtureWithNegativeSqueezeDims)
+{
+ BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()