aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-09-14 16:12:44 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-09-15 10:19:00 +0100
commit16f82f987b44b090a01807a2c79ed7fcc6bf80ea (patch)
tree5e26fccece92956c19e14d0d5c106e5d38ea4576 /src/armnnTfLiteParser
parent919c14ef132986aa1514b2070ce6d19b5579a6ab (diff)
downloadarmnn-16f82f987b44b090a01807a2c79ed7fcc6bf80ea.tar.gz
IVGCVSW-5305 AddBroadcastReshapeLayer as optimizer
* Remove AddBroadcastReshapeLayer from TfLiteParser * Add AddBroadcastReshapeLayer as optimizer * AddBroadcastReshapeLayer optimizer unit tests * Load-scope dynamic tensor broadcasting unit tests Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I3549e85b71b41cbd4d96c0f1ece7887acbca76d1
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp124
-rw-r--r--src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp205
2 files changed, 213 insertions, 116 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 109c2c2be1..6143f4af6a 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -443,7 +443,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
}
}
-armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
+armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
{
auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
@@ -609,69 +609,6 @@ void TfLiteParser::ResetParser()
m_SubgraphConnections.clear();
}
-void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
- size_t operatorIndex,
- IConnectableLayer *layer)
-{
- CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- ARMNN_ASSERT(layer != nullptr);
-
- const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
- const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
-
- ARMNN_ASSERT(operatorPtr->inputs.size() > 1);
-
- uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
- TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
- uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
- TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
-
- armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
- armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
-
- uint32_t inputSlotId = 1;
- uint32_t reshapeSlotId = 0;
-
- if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
- {
- uint32_t id = reshapedInputId;
- reshapedInputId = inputId;
- inputId = id;
-
- reshapedTensorInfo = ToTensorInfo(tensorPtr1);
- inputTensorInfo = ToTensorInfo(tensorPtr);
-
- inputSlotId = 0;
- reshapeSlotId = 1;
- }
-
- uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
-
- std::vector<unsigned> reshapedDim;
- for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
- {
- reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
- }
-
- std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
- std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
-
- reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
-
- std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
- armnn::ReshapeDescriptor desc;
- desc.m_TargetShape = reshapedTensorInfo.GetShape();
- armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
-
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
- reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(reshapeSlotId));
-
- RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
-
- armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(inputSlotId));
- RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
-}
-
INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
{
ResetParser();
@@ -995,7 +932,7 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
// Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
-
+
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
@@ -1353,14 +1290,7 @@ void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
- {
- AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
- }
- else
- {
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
- }
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
@@ -1390,14 +1320,7 @@ void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
- {
- AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
- }
- else
- {
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
- }
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
@@ -1768,14 +1691,7 @@ void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
- {
- AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
- }
- else
- {
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
- }
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
@@ -1807,15 +1723,7 @@ void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
- {
- AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
- }
- else
- {
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
- }
-
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1846,15 +1754,7 @@ void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
- {
- AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
- }
- else
- {
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
- }
-
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1885,15 +1785,7 @@ void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
- {
- AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
- }
- else
- {
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
- }
-
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
diff --git a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
index c4f0db7f49..89a6640e41 100644
--- a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
+++ b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
@@ -171,4 +171,209 @@ BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor2, LoadScopeDynamicTensor2Fixture)
true);
}
+struct LoadScopeDynamicTensorBroadcastingFixture : public ParserFlatbuffersFixture
+{
+ explicit LoadScopeDynamicTensorBroadcastingFixture(const std::string& inputShape0,
+ const std::string& inputShape1,
+ const std::string& inputShape2,
+ const std::string& addShape,
+ const std::string& outputShape)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [
+ {
+ "builtin_code": "ADD",
+ "version": 1
+ },
+ {
+ "builtin_code": "SUB",
+ "version": 1
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )" + inputShape0 + R"(,
+ "type": "FLOAT32",
+ "buffer": 1,
+ "name": "input0",
+ "quantization": {
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": )" + inputShape1 + R"(,
+ "type": "FLOAT32",
+ "buffer": 2,
+ "name": "input1",
+ "quantization": {
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 5,
+ "name": "output",
+ "quantization": {
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+
+ {
+ "shape": )" + addShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 4,
+ "name": "model/add/add",
+ "quantization": {
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": )" + inputShape2 + R"(,
+ "type": "FLOAT32",
+ "buffer": 3,
+ "name": "input2",
+ "quantization": {
+ "details_type": 0,
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ ],
+ "inputs": [
+ 0,
+ 1,
+ 4
+ ],
+ "outputs": [
+ 2
+ ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [
+ 0,
+ 1
+ ],
+ "outputs": [
+ 3
+ ],
+ "builtin_options_type": "AddOptions",
+ "builtin_options": {
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ },
+ {
+ "opcode_index": 1,
+ "inputs": [
+ 3,
+ 4
+ ],
+ "outputs": [
+ 2
+ ],
+ "builtin_options_type": "SubOptions",
+ "builtin_options": {
+ "fused_activation_function": "NONE"
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ "name": "main"
+ }
+ ],
+ "buffers": [
+ {
+ },
+ {
+ },
+ {
+ },
+ {
+ },
+ {
+ },
+ {
+ }
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct LoadScopeDynamicTensorBroadcasting3DFixture : LoadScopeDynamicTensorBroadcastingFixture
+{
+ LoadScopeDynamicTensorBroadcasting3DFixture() : LoadScopeDynamicTensorBroadcastingFixture("[ 1, 2, 3, 2 ]",
+ "[ 2, 3, 2 ]",
+ "[ 2, 3, 2 ]",
+ "[ 1, 2, 3, 2 ]", "[]") {}
+};
+
+struct LoadScopeDynamicTensorBroadcasting2DFixture : LoadScopeDynamicTensorBroadcastingFixture
+{
+ LoadScopeDynamicTensorBroadcasting2DFixture() : LoadScopeDynamicTensorBroadcastingFixture("[ 1, 2, 3, 2 ]",
+ "[ 3, 2 ]",
+ "[ 3, 2 ]",
+ "[]", "[]") {}
+};
+
+struct LoadScopeDynamicTensorBroadcasting1DFixture : LoadScopeDynamicTensorBroadcastingFixture
+{
+ LoadScopeDynamicTensorBroadcasting1DFixture() : LoadScopeDynamicTensorBroadcastingFixture("[ 1, 2, 3, 2 ]",
+ "[ 1 ]",
+ "[ 1 ]",
+ "[]",
+ "[ 1, 2, 3, 2 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting3D, LoadScopeDynamicTensorBroadcasting3DFixture)
+{
+ RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ 0,
+ { {"input0", { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f }},
+ {"input1", { 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f }},
+ {"input2", { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f }}
+ },
+ { {"output", { 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 13.f }} },
+ true);
+}
+
+BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting2D, LoadScopeDynamicTensorBroadcasting2DFixture)
+{
+ RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ 0,
+ { {"input0", { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f }},
+ {"input1", { 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }},
+ {"input2", { -1.f, -2.f, 3.f, 4.f, 5.f, 6.f }}
+ },
+ { {"output", { 4.f, 7.f, 4.f, 5.f, 6.f, 7.f, 10.f, 13.f, 10.f, 11.f, 12.f, 13.f }} },
+ true);
+}
+
+BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting1D, LoadScopeDynamicTensorBroadcasting1DFixture)
+{
+ RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ 0,
+ { {"input0", { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f }},
+ {"input1", { 5.f }},
+ {"input2", { 1.f }}
+ },
+ { {"output", { 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f }} },
+ true);
+}
+
BOOST_AUTO_TEST_SUITE_END()