aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/test
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-03-01 12:31:41 +0000
committerFinn Williams <Finn.Williams@arm.com>2021-03-25 16:54:05 +0000
commitd4fa5456ba596c9fc5e2ab7de836c5157aa7a8f8 (patch)
treed43fbd37b45dc39cda46e8138be0def3d67d41ac /src/armnnTfLiteParser/test
parentade8c1daab87abf628c42a0bbc002193d7ac40f6 (diff)
downloadarmnn-d4fa5456ba596c9fc5e2ab7de836c5157aa7a8f8.tar.gz
IVGCVSW-5741 Update FullyConnected in TfLiteParser to support NonConstWeights
!armnn:5180 * Remove unnecessary memcopy for non permeuted const tensors Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: Idc3ce2ac001e7d6be61819279de486f093730383
Diffstat (limited to 'src/armnnTfLiteParser/test')
-rw-r--r--src/armnnTfLiteParser/test/FullyConnected.cpp177
1 files changed, 171 insertions, 6 deletions
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index e7aa9082e2..333e17fafd 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -8,18 +8,17 @@
#include "../TfLiteParser.hpp"
#include <string>
-#include <iostream>
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
struct FullyConnectedFixture : public ParserFlatbuffersFixture
{
explicit FullyConnectedFixture(const std::string& inputShape,
- const std::string& outputShape,
- const std::string& filterShape,
- const std::string& filterData,
- const std::string biasShape = "",
- const std::string biasData = "")
+ const std::string& outputShape,
+ const std::string& filterShape,
+ const std::string& filterData,
+ const std::string biasShape = "",
+ const std::string biasData = "")
{
std::string inputTensors = "[ 0, 2 ]";
std::string biasTensor = "";
@@ -195,4 +194,170 @@ BOOST_FIXTURE_TEST_CASE(
true);
}
+
+struct FullyConnectedNonConstWeightsFixture : public ParserFlatbuffersFixture
+{
+ explicit FullyConnectedNonConstWeightsFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& filterShape,
+ const std::string biasShape = "")
+ {
+ std::string inputTensors = "[ 0, 1 ]";
+ std::string biasTensor = "";
+ std::string biasBuffer = "";
+ std::string outputs = "2";
+ if (biasShape.size() > 0)
+ {
+ inputTensors = "[ 0, 1, 2 ]";
+ biasTensor = R"(
+ {
+ "shape": )" + biasShape + R"(,
+ "type": "INT32",
+ "buffer": 2,
+ "name": "bias",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": true
+ }, )";
+
+ biasBuffer = R"(,{ "data": [ 10, 0, 0, 0 ] } )";
+ outputs = "3";
+ }
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [
+ {
+ "builtin_code": "FULLY_CONNECTED",
+ "version": 1
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "INT8",
+ "buffer": 0,
+ "name": "input_0",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": )" + filterShape + R"(,
+ "type": "INT8",
+ "buffer": 1,
+ "name": "weights",
+ "quantization": {
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": true
+ },
+ )" + biasTensor + R"(
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "INT8",
+ "buffer": 0,
+ "name": "output",
+ "quantization": {
+ "scale": [
+ 2.0
+ ],
+ "zero_point": [
+ 0
+ ],
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ }
+ ],
+ "inputs": )" + inputTensors + R"(,
+ "outputs": [ )" + outputs + R"( ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": )" + inputTensors + R"(,
+ "outputs": [ )" + outputs + R"( ],
+ "builtin_options_type": "FullyConnectedOptions",
+ "builtin_options": {
+ "fused_activation_function": "NONE",
+ "weights_format": "DEFAULT",
+ "keep_num_dims": false,
+ "asymmetric_quantize_inputs": false
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ]
+ }
+ ],
+ "description": "ArmnnDelegate: FullyConnected Operator Model",
+ "buffers": [
+ {
+ "data": []
+ },
+ {
+ "data": [ 2, 3, 4, 5 ]
+ }
+ )" + biasBuffer + R"(
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct FullyConnectedNonConstWeights : FullyConnectedNonConstWeightsFixture
+{
+ FullyConnectedNonConstWeights()
+ : FullyConnectedNonConstWeightsFixture("[ 1, 4, 1, 1 ]", // inputShape
+ "[ 1, 1 ]", // outputShape
+ "[ 1, 4 ]", // filterShape
+ "[ 1 ]" ) // biasShape
+
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeights, FullyConnectedNonConstWeights)
+{
+ RunTest<2, armnn::DataType::QAsymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmS8>(
+ 0,
+ {{{"input_0", { 1, 2, 3, 4 }},{"weights", { 2, 3, 4, 5 }}}},
+ {{"bias", { 10 }}},
+ {{"output", { 25 }}});
+}
+
+struct FullyConnectedNonConstWeightsNoBias : FullyConnectedNonConstWeightsFixture
+{
+ FullyConnectedNonConstWeightsNoBias()
+ : FullyConnectedNonConstWeightsFixture("[ 1, 4, 1, 1 ]", // inputShape
+ "[ 1, 1 ]", // outputShape
+ "[ 1, 4 ]") // filterShape
+
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeightsNoBias, FullyConnectedNonConstWeightsNoBias)
+{
+ RunTest<2, armnn::DataType::QAsymmS8,
+ armnn::DataType::QAsymmS8>(
+ 0,
+ {{{"input_0", { 1, 2, 3, 4 }},{"weights", { 2, 3, 4, 5 }}}},
+ {{"output", { 20 }}});
+}
+
BOOST_AUTO_TEST_SUITE_END()