aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-08-12 14:48:15 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-08-31 14:13:48 +0000
commit7684b18e8fec45355a49e7f7165c582efc553ab6 (patch)
treef79078cec5e14534b82effaa6f8c4b70c3fe6bff /delegate
parentb338fb08cc77892f9cc19132421db06d77f9afde (diff)
downloadarmnn-7684b18e8fec45355a49e7f7165c582efc553ab6.tar.gz
MLCE-530 Add support for UnidirectionalSequenceLstm to armnn delegate
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: Ib04f8d6b9e60a4204c56eba4c2ecd2b316509dcc
Diffstat (limited to 'delegate')
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/src/DelegateUtils.hpp16
-rw-r--r--delegate/src/Lstm.hpp82
-rw-r--r--delegate/src/UnidirectionalSequenceLstm.hpp266
-rw-r--r--delegate/src/armnn_delegate.cpp7
-rw-r--r--delegate/src/test/TestUtils.cpp9
-rw-r--r--delegate/src/test/TestUtils.hpp3
-rw-r--r--delegate/src/test/UnidirectionalSequenceLstmTest.cpp826
-rw-r--r--delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp722
9 files changed, 1884 insertions, 49 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index b43feb7f9c..effb093f4d 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -181,6 +181,8 @@ if(BUILD_UNIT_TESTS)
src/test/TestUtils.cpp
src/test/TransposeTest.cpp
src/test/TransposeTestHelper.hpp
+ src/test/UnidirectionalSequenceLstmTest.cpp
+ src/test/UnidirectionalSequenceLstmTestHelper.hpp
src/test/UnpackTest.cpp
src/test/UnpackTestHelper.hpp)
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 2d1651842a..8c7ba25e15 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -506,6 +506,13 @@ armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
}
}
+armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
+{
+ const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
+ armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
+ return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
+}
+
void CalcPadding(uint32_t inputSize,
uint32_t filterSize,
uint32_t stride,
@@ -561,6 +568,15 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
return kTfLiteOk;
}
+bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
+{
+ if (tfLiteNode->inputs->data[operandIndex] < 0) {
+ return true;
+ }
+ return false;
+
+}
+
TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
armnnDelegate::DelegateData& delegateData,
TfLiteContext* tfLiteContext,
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
index 829e3bf9c6..8d719ee351 100644
--- a/delegate/src/Lstm.hpp
+++ b/delegate/src/Lstm.hpp
@@ -19,22 +19,6 @@
namespace armnnDelegate
{
-bool IsOptional(TfLiteNode* tfLiteNode, const int index)
-{
- if (tfLiteNode->inputs->data[index] < 0) {
- return true;
- }
- return false;
-
-}
-
-armnn::ConstTensor* CreateConstTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
-{
- const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
- armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
- return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
-}
-
TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
@@ -68,60 +52,60 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
// Set the params structure for the AddLstmLayer call
armnn::LstmInputParams params;
- if (!IsOptional(tfLiteNode, 1))
+ if (!IsOptionalOperandPresent(tfLiteNode, 1))
{
- params.m_InputToInputWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 1);
+ params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1);
}
- params.m_InputToForgetWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 2);
- params.m_InputToCellWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 3);
- params.m_InputToOutputWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 4);
+ params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 2);
+ params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 3);
+ params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4);
// Recurrent weight tensors of size {n_cell, n_output}
- if (!IsOptional(tfLiteNode, 5))
+ if (!IsOptionalOperandPresent(tfLiteNode, 5))
{
- params.m_RecurrentToInputWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 5);
+ params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5);
}
- params.m_RecurrentToForgetWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 6);
- params.m_RecurrentToCellWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 7);
- params.m_RecurrentToOutputWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 8);
+ params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 6);
+ params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 7);
+ params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8);
// Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
- if (!IsOptional(tfLiteNode, 9))
+ if (!IsOptionalOperandPresent(tfLiteNode, 9))
{
- params.m_CellToInputWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 9);
+ params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9);
}
- if (!IsOptional(tfLiteNode, 10))
+ if (!IsOptionalOperandPresent(tfLiteNode, 10))
{
- params.m_CellToForgetWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 10);
+ params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10);
}
- if (!IsOptional(tfLiteNode, 11))
+ if (!IsOptionalOperandPresent(tfLiteNode, 11))
{
- params.m_CellToOutputWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 11);
+ params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11);
}
// Gates bias tensors of size {n_cell}
- if (!IsOptional(tfLiteNode, 12))
+ if (!IsOptionalOperandPresent(tfLiteNode, 12))
{
- params.m_InputGateBias = CreateConstTensor(tfLiteTensors, tfLiteNode, 12);
+ params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12);
}
- params.m_ForgetGateBias = CreateConstTensor(tfLiteTensors, tfLiteNode, 13);
- params.m_CellBias = CreateConstTensor(tfLiteTensors, tfLiteNode, 14);
- params.m_OutputGateBias = CreateConstTensor(tfLiteTensors, tfLiteNode, 15);
+ params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 13);
+ params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 14);
+ params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15);
// Projection weight tensor of size {n_output, n_cell}
- if (!IsOptional(tfLiteNode, 16))
+ if (!IsOptionalOperandPresent(tfLiteNode, 16))
{
- params.m_ProjectionWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 16);
+ params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16);
}
// Projection bias tensor of size {n_output}
- if (!IsOptional(tfLiteNode, 17))
+ if (!IsOptionalOperandPresent(tfLiteNode, 17))
{
- params.m_ProjectionBias = CreateConstTensor(tfLiteTensors, tfLiteNode, 17);
+ params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17);
}
// These state tensors are defined as variable tensors, and will be modified by this op.
@@ -129,24 +113,24 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]);
// Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
- if (tfLiteNode->inputs->size >= 21 && !IsOptional(tfLiteNode, 20))
+ if (tfLiteNode->inputs->size >= 21 && !IsOptionalOperandPresent(tfLiteNode, 20))
{
- params.m_InputLayerNormWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 20);
+ params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20);
}
- if (tfLiteNode->inputs->size >= 22 && !IsOptional(tfLiteNode, 21))
+ if (tfLiteNode->inputs->size >= 22 && !IsOptionalOperandPresent(tfLiteNode, 21))
{
- params.m_ForgetLayerNormWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 21);
+ params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21);
}
- if (tfLiteNode->inputs->size >= 23 && !IsOptional(tfLiteNode, 22))
+ if (tfLiteNode->inputs->size >= 23 && !IsOptionalOperandPresent(tfLiteNode, 22))
{
- params.m_CellLayerNormWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 22);
+ params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22);
}
- if (tfLiteNode->inputs->size >= 24 && !IsOptional(tfLiteNode, 23))
+ if (tfLiteNode->inputs->size >= 24 && !IsOptionalOperandPresent(tfLiteNode, 23))
{
- params.m_OutputLayerNormWeights = CreateConstTensor(tfLiteTensors, tfLiteNode, 23);
+ params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23);
}
// set the layer descriptor
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
new file mode 100644
index 0000000000..8aff39381e
--- /dev/null
+++ b/delegate/src/UnidirectionalSequenceLstm.hpp
@@ -0,0 +1,266 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DelegateUtils.hpp"
+
+#include <armnn/LstmParams.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const auto nodeParams = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams *>(tfLiteNode->builtin_data);
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
+ // Please refer to each operand at
+ // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
+ armnn::LstmInputParams params;
+
+ if (!IsOptionalOperandPresent(tfLiteNode, 1))
+ {
+ params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1);
+ }
+
+ params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 2);
+ params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 3);
+ params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4);
+
+ // Recurrent weight tensors of size {n_cell, n_output}
+ if (!IsOptionalOperandPresent(tfLiteNode, 5))
+ {
+ params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5);
+ }
+
+ params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 6);
+ params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 7);
+ params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8);
+
+ // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
+ if (!IsOptionalOperandPresent(tfLiteNode, 9))
+ {
+ params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9);
+ }
+
+ if (!IsOptionalOperandPresent(tfLiteNode, 10))
+ {
+ params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10);
+ }
+
+ if (!IsOptionalOperandPresent(tfLiteNode, 11))
+ {
+ params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11);
+ }
+
+ // Gates bias tensors of size {n_cell}
+ if (!IsOptionalOperandPresent(tfLiteNode, 12))
+ {
+ params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12);
+ }
+
+ params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 13);
+ params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 14);
+ params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15);
+
+ // Projection weight tensor of size {n_output, n_cell}
+ if (!IsOptionalOperandPresent(tfLiteNode, 16))
+ {
+ params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16);
+ }
+ // Projection bias tensor of size {n_output}
+ if (!IsOptionalOperandPresent(tfLiteNode, 17))
+ {
+ params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17);
+ }
+
+ // These state tensors are defined as variable tensors, and will be modified by this op.
+ armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[18]]);
+ armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]);
+
+ // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
+ if (tfLiteNode->inputs->size >= 21 && !IsOptionalOperandPresent(tfLiteNode, 20))
+ {
+ params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20);
+ }
+
+ if (tfLiteNode->inputs->size >= 22 && !IsOptionalOperandPresent(tfLiteNode, 21))
+ {
+ params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21);
+ }
+
+ if (tfLiteNode->inputs->size >= 23 && !IsOptionalOperandPresent(tfLiteNode, 22))
+ {
+ params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22);
+ }
+
+ if (tfLiteNode->inputs->size >= 24 && !IsOptionalOperandPresent(tfLiteNode, 23))
+ {
+ params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23);
+ }
+
+ // set the layer descriptor
+ armnn::UnidirectionalSequenceLstmDescriptor desc;
+ desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex);
+ desc.m_ClippingThresCell = nodeParams->cell_clip;
+ desc.m_ClippingThresProj = nodeParams->proj_clip;
+ desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
+ || params.m_RecurrentToInputWeights == nullptr
+ || params.m_InputGateBias == nullptr);
+ desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
+ desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
+ desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
+ || params.m_ForgetLayerNormWeights != nullptr
+ || params.m_CellLayerNormWeights != nullptr
+ || params.m_OutputLayerNormWeights != nullptr);
+ desc.m_TimeMajor = nodeParams->time_major;
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ unsigned int batchSize = inputTensorInfo.GetShape()[0];
+ unsigned int outputSize = outputTensorInfo.GetShape()[2];
+ unsigned int numUnits = cellStateInInfo.GetShape()[1];
+
+ armnn::DataType dataType = inputTensorInfo.GetDataType();
+ float qScale = inputTensorInfo.GetQuantizationScale();
+ float qOffset = inputTensorInfo.GetQuantizationOffset();
+
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
+ if (!desc.m_CifgEnabled)
+ {
+ scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
+ }
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
+
+ armnn::LstmInputParamsInfo paramsInfo;
+ paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
+ paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
+ paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
+ paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
+ paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
+ paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
+ paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
+ paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
+ paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
+
+ if (!desc.m_CifgEnabled)
+ {
+ paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
+ paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
+ if (params.m_CellToInputWeights != nullptr)
+ {
+ paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
+ }
+ paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
+ }
+
+ if (desc.m_ProjectionEnabled)
+ {
+ paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
+ if (params.m_ProjectionBias != nullptr)
+ {
+ paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
+ }
+ }
+
+ if (desc.m_PeepholeEnabled)
+ {
+ paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
+ paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
+ }
+
+ if (desc.m_LayerNormEnabled)
+ {
+ if(!desc.m_CifgEnabled)
+ {
+ paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
+ }
+ paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
+ paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
+ paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
+ }
+
+ // hiddenStateOutput and cellStateOutput do not present in TfLite UnidirectionalSequenceLstm
+ armnn::Optional<armnn::TensorInfo> optionalTensor;
+
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsUnidirectionalSequenceLstmSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outputStateInInfo,
+ cellStateInInfo,
+ outputInfo,
+ optionalTensor,
+ optionalTensor,
+ desc,
+ paramsInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ ARMNN_ASSERT(layer != nullptr);
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // Connect the inputs
+ // input_layer
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(0));
+ // cellStateIn
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[18]]->Connect(layer->GetInputSlot(1));
+ //outputStateIn
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[19]]->Connect(layer->GetInputSlot(2));
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[0])] = &outputSlot;
+ return kTfLiteOk;
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 0ac33808b0..d3ccecb444 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -36,6 +36,7 @@
#include "SpaceDepth.hpp"
#include "Split.hpp"
#include "Transpose.hpp"
+#include "UnidirectionalSequenceLstm.hpp"
#include "Unpack.hpp"
#include <flatbuffers/flatbuffers.h>
@@ -890,6 +891,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinTanh);
+ case kTfLiteBuiltinUnidirectionalSequenceLstm:
+ return VisitUnidirectionalSequenceLstmOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinUnidirectionalSequenceLstm);
case kTfLiteBuiltinUnpack:
return VisitUnpackOperator(delegateData,
tfLiteContext,
diff --git a/delegate/src/test/TestUtils.cpp b/delegate/src/test/TestUtils.cpp
index bbe89904eb..9dce4461da 100644
--- a/delegate/src/test/TestUtils.cpp
+++ b/delegate/src/test/TestUtils.cpp
@@ -34,6 +34,15 @@ void CompareData(float tensor1[], float tensor2[], size_t tensorSize)
}
}
+void CompareData(float tensor1[], float tensor2[], size_t tensorSize, float percentTolerance)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <=
+ std::abs(tensor1[i]*percentTolerance/100));
+ }
+}
+
void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize)
{
uint8_t tolerance = 1;
diff --git a/delegate/src/test/TestUtils.hpp b/delegate/src/test/TestUtils.hpp
index 8a2756f4c5..5d4a0ed7d4 100644
--- a/delegate/src/test/TestUtils.hpp
+++ b/delegate/src/test/TestUtils.hpp
@@ -42,6 +42,9 @@ void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize);
/// Can be used to compare float data coming from a tflite interpreter with a tolerance of limit_of_float*100
void CompareData(float tensor1[], float tensor2[], size_t tensorSize);
+/// Can be used to compare float data coming from a tflite interpreter with a given percentage tolerance
+void CompareData(float tensor1[], float tensor2[], size_t tensorSize, float percentTolerance);
+
/// Can be used to compare int8_t data coming from a tflite interpreter with a tolerance of 1
void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize);
diff --git a/delegate/src/test/UnidirectionalSequenceLstmTest.cpp b/delegate/src/test/UnidirectionalSequenceLstmTest.cpp
new file mode 100644
index 0000000000..f0a96da57e
--- /dev/null
+++ b/delegate/src/test/UnidirectionalSequenceLstmTest.cpp
@@ -0,0 +1,826 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "UnidirectionalSequenceLstmTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
+{
+ int32_t batchSize = 3;
+ int32_t timeSize = 2;
+ int32_t inputSize = 3;
+ int32_t outputSize = 4;
+ // cellSize and outputSize have the same size when there is no projection.
+ int32_t numUnits = outputSize;
+
+ //tensorInfo12,
+ bool hasInputToInputWeights = true;
+ std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
+ -0.117484632f, 0.3298470976f, -0.1179017122f,
+ 0.214305695f, 0.42135173085f, 0.003878414626f,
+ -0.348303917f, -0.1881275477f, 0.0343011027f };
+
+ std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
+ -0.3810434485f, 0.268383264f, -0.009807467424f,
+ -0.3522925403f, -0.24275735512f, -0.28344226125f,
+ 0.13512269116f, -0.4932442977f, -0.10039821991f };
+
+ std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
+ 0.386399507f, -0.259465157985f, -0.16545993089f,
+ -0.4230232555f, 0.341664791103f, -0.18127849691f,
+ -0.2277662414f, -0.55275535589f, 0.34184026718f };
+
+ std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
+ 0.53969591851f, 0.23393625035f, -0.27140527306f,
+ 0.50009280443f, 0.07511717046f, 0.3998299249f,
+ -0.51717478049f, 0.1889653282f, -0.367323637f };
+
+ //tensorInfo16,
+ bool hasRecurrentToInputWeights = true;
+ std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
+ -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
+ 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
+ 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f };
+
+ std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+ -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+ -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+ -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
+
+ std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+ -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+ 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
+
+ std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
+ -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
+ 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
+ // tensorInfo4
+ bool hasCellToInputWeights = false;
+ std::vector<float> cellToInputWeights;
+ bool hasCellToForgetWeights = false;
+ std::vector<float> cellToForgetWeights;
+ bool hasCellToOutputWeights = false;
+ std::vector<float> cellToOutputWeights;
+
+ bool hasInputGateBias = true;
+ std::vector<float> inputGateBias = {0., 0., 0., 0.};
+ std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+ std::vector<float> cellBias = {0., 0., 0., 0.};
+ std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+ bool hasProjectionWeights = false;
+ std::vector<float> projectionWeights;
+ bool hasProjectionBias = false;
+ std::vector<float> projectionBias;
+
+ bool hasInputLayerNormWeights = false;
+ std::vector<float> inputLayerNormWeights;
+ bool hasForgetLayerNormWeights = false;
+ std::vector<float> forgetLayerNormWeights;
+ bool hasCellLayerNormWeights = false;
+ std::vector<float> cellLayerNormWeights;
+ bool hasOutputLayerNormWeights = false;
+ std::vector<float> outputLayerNormWeights;
+
+ std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2. };
+ std::vector<float> expectedOutputValues = { -0.0714901f, -0.162117f, -0.175168f, -0.0232934f,
+ -0.168107f, -0.414129f, -0.549875f, -0.00803579f,
+ -0.0668735f, 0.204078f, -0.42765f, -0.0312321f,
+ -0.120003f, -0.0941918f, -0.456391f, -0.0287019f,
+ -0.0342921f, 0.20824f, -0.656989f, -0.00415265f,
+ -0.10493f, 0.14211f, -0.583478f, -0.0329754f };
+
+ tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+ float clippingThresCell = 10.f;
+ float clippingThresProj = 0.f;
+ bool isTimeMajor = false;
+
+ UnidirectionalSequenceLstmTestImpl<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ inputValues,
+ expectedOutputValues,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmTimeMajorTest(std::vector<armnn::BackendId>& backends)
+{
+ int32_t batchSize = 3;
+ int32_t timeSize = 2;
+ int32_t inputSize = 3;
+ int32_t outputSize = 4;
+ // cellSize and outputSize have the same size when there is no projection.
+ int32_t numUnits = outputSize;
+
+ std::vector<int32_t> inputShape = {timeSize, batchSize, inputSize};
+ std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
+ std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
+
+ std::vector<int32_t> outputTensorInfo = {timeSize, batchSize, outputSize};
+
+ //tensorInfo12
+ bool hasInputToInputWeights = true;
+ std::vector<float> inputToInputWeights = { 0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
+ 0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
+ 0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
+ -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f };
+
+ std::vector<float> inputToForgetWeights = { -0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
+ -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
+ -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
+ -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f };
+
+ std::vector<float> inputToCellWeights = { -0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
+ 0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
+ 0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
+ -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f };
+
+ std::vector<float> inputToOutputWeights = { -0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
+ -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
+ 0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
+ -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f };
+
+ //tensorInfo16
+ bool hasRecurrentToInputWeights = true;
+ std::vector<float> recurrentToInputWeights = { 0.23788475990f, -0.24948765337f, 0.50044941902f, 0.14431896805f,
+ -0.115940228137f, -0.717082679f, -0.17208620906f, 0.17850610617f,
+ -0.16702319684f, -0.11384502053f, -0.309785276245f, -0.3316611672f,
+ 0.52380162477f, -0.06839632987f, -0.391478359627f, -0.10756178963f };
+
+ std::vector<float> recurrentToForgetWeights = { 0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
+ 0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
+ -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
+ 0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f };
+
+ std::vector<float> recurrentToCellWeights = { 0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
+ -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
+ -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
+ -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f };
+
+ std::vector<float> recurrentToOutputWeights = { -0.079031050201f, 0.041414566286f, -0.583727357285f, 0.1025384515f,
+ -0.172372072937f, 0.09214124082f, 0.178184121827f, -0.2439443916f,
+ 0.104485116899f, 0.2600405514f, 0.064414866268f, 0.24141204357f,
+ 0.281875759363f, -0.14234502664f, 0.15126448862f, -0.24421440064f };
+ // tensorInfo4
+ bool hasCellToInputWeights = false;
+ std::vector<float> cellToInputWeights;
+ bool hasCellToForgetWeights = false;
+ std::vector<float> cellToForgetWeights;
+ bool hasCellToOutputWeights = false;
+ std::vector<float> cellToOutputWeights;
+
+ bool hasInputGateBias = true;
+ std::vector<float> inputGateBias = {0., 0., 0., 0.};
+ std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+ std::vector<float> cellBias = {0., 0., 0., 0.};
+ std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+ bool hasProjectionWeights = false;
+ std::vector<float> projectionWeights;
+ bool hasProjectionBias = false;
+ std::vector<float> projectionBias;
+
+ bool hasInputLayerNormWeights = false;
+ std::vector<float> inputLayerNormWeights;
+ bool hasForgetLayerNormWeights = false;
+ std::vector<float> forgetLayerNormWeights;
+ bool hasCellLayerNormWeights = false;
+ std::vector<float> cellLayerNormWeights;
+ bool hasOutputLayerNormWeights = false;
+ std::vector<float> outputLayerNormWeights;
+
+ std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2. };
+ std::vector<float> expectedOutputValues = { 0.135658f, 0.124673f, 0.021209f, -0.0530204f,
+ 0.106138f, 0.0404792f, 0.0151644f, -0.00675166f,
+ -0.0128514f, 0.0644884f, 0.0709072f, -0.0454045f,
+ 0.162886f, 0.166494f, 0.0277046f, -0.0369807f,
+ 0.111716f, 0.043119f, 0.0762981f, -0.0122854f,
+ 0.104397f, 0.2144f, 0.119192f, -0.0839058f };
+
+ tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+ float clippingThresCell = 10.f;
+ float clippingThresProj = 0.f;
+ bool isTimeMajor = true;
+
+ UnidirectionalSequenceLstmTestImpl<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ inputValues,
+ expectedOutputValues,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
+{
+ int32_t batchSize = 2;
+ int32_t timeSize = 3;
+ int32_t inputSize = 4;
+ int32_t outputSize = 5;
+ int32_t numUnits = 6;
+
+ std::vector<int32_t> inputShape = {batchSize, timeSize, inputSize};
+ std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
+ std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
+
+ std::vector<int32_t> outputTensorInfo = {batchSize, timeSize, outputSize};
+
+ //tensorInfoInputSize,
+ bool hasInputToInputWeights = true;
+ std::vector<float> inputToInputWeights = { 0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
+ -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
+ -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
+ -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
+ -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
+ -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f };
+
+ std::vector<float> inputToForgetWeights = { -0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
+ 0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
+ 0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
+ -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
+ -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
+ 0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
+
+ std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
+ -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
+ -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
+ -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
+ -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
+ 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f };
+
+ std::vector<float> inputToOutputWeights = { -0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
+ -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
+ -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
+ 0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
+ 0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
+ -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f };
+
+ //tensorInfoOutputSize,
+ bool hasRecurrentToInputWeights = true;
+ std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
+ -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
+ -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
+ -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
+ 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
+ 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
+ -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
+ 0.14283475f, -0.07390571f };
+
+ std::vector<float> recurrentToForgetWeights = { -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
+ 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
+ -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
+ 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
+ 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
+ -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
+ -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
+ 0.061878487f, -0.04729229f };
+
+ std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
+ 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
+ 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
+ -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
+ 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
+ 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
+ -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
+ -0.019443132f, -0.030755889f };
+
+ std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,
+ -0.045984812f,-0.01255415f, -0.0026479573f,
+ -0.08196161f, -0.054914974f, -0.0046604523f,
+ -0.029587349f, -0.044576716f, -0.07480124f,
+ -0.082868785f, 0.023254942f, 0.027502948f,
+ -0.0039728214f, -0.08683098f, -0.08116779f,
+ -0.014675607f, -0.037924774f, -0.023314456f,
+ -0.007401714f, -0.09255757f, 0.029460307f,
+ -0.08829125f, -0.005139627f, -0.08989442f,
+ -0.0555066f, 0.13596267f, 0.025062224f };
+ // tensorInfoNumUnits
+ bool hasCellToInputWeights = true;
+ std::vector<float> cellToInputWeights = { 0.040369894f, 0.030746894f, 0.24704495f,
+ 0.018586371f, -0.037586458f, -0.15312155f };
+ bool hasCellToForgetWeights = true;
+ std::vector<float> cellToForgetWeights = { -0.01998659f, -0.15568835f, -0.24248174f,
+ -0.012770197f, 0.041331276f, -0.072311886f };
+ bool hasCellToOutputWeights = true;
+ std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f,
+ 0.002913762f, 0.17764764f, -0.5495371f };
+
+ bool hasInputGateBias = true;
+ std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f,
+ 0.10380666f, 0.053110216f, -0.06928846f };
+ std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.03032477f,
+ 0.23027696f, 0.11098921f, 0.08989442f };
+ std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f,
+ 0.033463873f, -0.1483596f, 0.029460307f };
+ std::vector<float> outputGateBias = { 0.046159424f, -0.0012809046f, 0.03563469f,
+ 0.12648113f, 0.027195795f, 0.35373217f };
+
+ bool hasProjectionWeights = true;
+ std::vector<float> projectionWeights = { -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
+ 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
+ -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
+ -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
+ 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
+ 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f };
+
+ bool hasProjectionBias = true;
+ std::vector<float> projectionBias(outputSize, 0.f);
+
+ bool hasInputLayerNormWeights = false;
+ std::vector<float> inputLayerNormWeights;
+ bool hasForgetLayerNormWeights = false;
+ std::vector<float> forgetLayerNormWeights;
+ bool hasCellLayerNormWeights = false;
+ std::vector<float> cellLayerNormWeights;
+ bool hasOutputLayerNormWeights = false;
+ std::vector<float> outputLayerNormWeights;
+
+ std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2.,
+ 1., 2., 3., 4., 5., 4.};
+ std::vector<float> expectedOutputValues = { -0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
+ -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
+ -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
+ 0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
+ -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
+ -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0126895f };
+
+ tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+ float clippingThresCell = 10.f;
+ float clippingThresProj = 0.f;
+ bool isTimeMajor = false;
+
+ UnidirectionalSequenceLstmTestImpl<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ inputValues,
+ expectedOutputValues,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
+{
+ int32_t batchSize = 3;
+ int32_t timeSize = 2;
+ int32_t inputSize = 3;
+ int32_t outputSize = 4;
+ // cellSize and outputSize have the same size when there is no projection.
+ int32_t numUnits = outputSize;
+
+ //tensorInfo12
+ bool hasInputToInputWeights = false;
+ std::vector<float> inputToInputWeights{};
+
+ std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
+ -0.3810434485f, 0.268383264f, -0.009807467424f,
+ -0.3522925403f, -0.24275735512f, -0.28344226125f,
+ 0.13512269116f, -0.4932442977f, -0.10039821991f };
+
+ std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
+ 0.386399507f, -0.259465157985f, -0.16545993089f,
+ -0.4230232555f, 0.341664791103f, -0.18127849691f,
+ -0.2277662414f, -0.55275535589f, 0.34184026718f };
+
+ std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
+ 0.53969591851f, 0.23393625035f, -0.27140527306f,
+ 0.50009280443f, 0.07511717046f, 0.3998299249f,
+ -0.51717478049f, 0.1889653282f, -0.367323637f };
+
+ //tensorInfo16
+ bool hasRecurrentToInputWeights = false;
+ std::vector<float> recurrentToInputWeights{};
+
+ std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+ -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+ -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+ -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
+
+ std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+ -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+ 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
+
+ std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
+ -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
+ 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
+ // tensorInfo4
+ bool hasCellToInputWeights = false;
+ std::vector<float> cellToInputWeights;
+ bool hasCellToForgetWeights = true;
+ std::vector<float> cellToForgetWeights = {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
+ bool hasCellToOutputWeights = true;
+ std::vector<float> cellToOutputWeights = {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
+
+ bool hasInputGateBias = false;
+ std::vector<float> inputGateBias;
+ std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+ std::vector<float> cellBias = {0., 0., 0., 0.};
+ std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+ bool hasProjectionWeights = false;
+ std::vector<float> projectionWeights;
+ bool hasProjectionBias = false;
+ std::vector<float> projectionBias;
+
+ bool hasInputLayerNormWeights = false;
+ std::vector<float> inputLayerNormWeights;
+ bool hasForgetLayerNormWeights = false;
+ std::vector<float> forgetLayerNormWeights;
+ bool hasCellLayerNormWeights = false;
+ std::vector<float> cellLayerNormWeights;
+ bool hasOutputLayerNormWeights = false;
+ std::vector<float> outputLayerNormWeights;
+
+ std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2. };
+ std::vector<float> expectedOutputValues = { -0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
+ -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
+ -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
+ -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
+ -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
+ -0.031675f, 0.125987f, -0.526695f, -0.110093f };
+
+ tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+ float clippingThresCell = 10.f;
+ float clippingThresProj = 0.f;
+ bool isTimeMajor = false;
+
+ UnidirectionalSequenceLstmTestImpl<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ inputValues,
+ expectedOutputValues,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(
+ std::vector<armnn::BackendId>& backends)
+{
+ int32_t batchSize = 3;
+ int32_t timeSize = 2;
+ int32_t inputSize = 3;
+ int32_t outputSize = 4;
+ int32_t numUnits = 5;
+
+ //tensorInfo15
+ bool hasInputToInputWeights = true;
+ std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
+ -0.117484632f, 0.3298470976f, -0.1179017122f,
+ 0.214305695f, 0.42135173085f, 0.003878414626f,
+ -0.348303917f, -0.1881275477f, 0.0343011027f,
+ -0.38837709614f, -0.05636804124f, 0.4259087456f};
+
+ std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
+ -0.3810434485f, 0.268383264f, -0.009807467424f,
+ -0.3522925403f, -0.24275735512f, -0.28344226125f,
+ 0.13512269116f, -0.4932442977f, -0.10039821991f,
+ 0.2726137042f, 0.09216640889f, -0.06551410215f};
+
+ std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
+ 0.386399507f, -0.259465157985f, -0.16545993089f,
+ -0.4230232555f, 0.341664791103f, -0.18127849691f,
+ -0.2277662414f, -0.55275535589f, 0.34184026718f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f};
+
+ std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
+ 0.53969591851f, 0.23393625035f, -0.27140527306f,
+ 0.50009280443f, 0.07511717046f, 0.3998299249f,
+ -0.51717478049f, 0.1889653282f, -0.367323637f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f};
+
+ //tensorInfo20
+ bool hasRecurrentToInputWeights = true;
+ std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
+ -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
+ 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
+ 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
+ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f };
+
+ std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+ -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+ -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+ -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
+ 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f };
+
+ std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+ -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+ 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
+ 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f };
+
+ std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
+ -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
+ 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f,
+ 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f };
+ // tensorInfo5
+ bool hasCellToInputWeights = true;
+ std::vector<float> cellToInputWeights = { 0.05f, 0.1f, 0.25f, 0.15f, -0.02f };
+ bool hasCellToForgetWeights = true;
+ std::vector<float> cellToForgetWeights = { -0.02f, -0.15f, -0.25f, -0.03f, 0.15f };
+ bool hasCellToOutputWeights = true;
+ std::vector<float> cellToOutputWeights = { 0.1f, -0.1f, -0.5f, 0.05f, 0.01f };
+
+ bool hasInputGateBias = true;
+ std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
+ std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
+ std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
+ std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
+
+ bool hasProjectionWeights = true;
+ std::vector<float> projectionWeights = { -0.1f, 0.2f, 0.01f, -0.2f,
+ 0.1f, 0.5f, 0.3f, 0.08f,
+ 0.07f, 0.2f, -0.4f, 0.2f,
+ 0.5f, -0.4f, 0.3f, -0.2f,
+ 0.3f, 0.08f, -0.07f, 0.2f}; //{outputSize, numUnits}
+ bool hasProjectionBias = true;
+ std::vector<float> projectionBias(outputSize, 0.f);;
+
+ bool hasInputLayerNormWeights = true;
+ std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.8f };
+ bool hasForgetLayerNormWeights = true;
+ std::vector<float> forgetLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
+ bool hasCellLayerNormWeights = true;
+ std::vector<float> cellLayerNormWeights = { 0.7f, 0.2f, 0.3f, 0.8f, 0.5f };
+ bool hasOutputLayerNormWeights = true;
+ std::vector<float> outputLayerNormWeights = { 0.6f, 0.2f, 0.2f, 0.5f, 0.1f };
+
+ std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2. };
+ std::vector<float> expectedOutputValues = { 0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
+ 0.11458f, 0.0407109f, 0.300327f, 0.174301f,
+ 0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
+ 0.108008f, 0.0386623f, 0.273471f, 0.167115f,
+ 0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
+ 0.106649f, 0.0276847f, 0.229863f, 0.166958f };
+
+ tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+ float clippingThresCell = 10.f;
+ float clippingThresProj = 0.f;
+ bool isTimeMajor = false;
+
+ UnidirectionalSequenceLstmTestImpl<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ inputValues,
+ expectedOutputValues,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor);
+}
+
+
+
+TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRefTests")
+{
+
+TEST_CASE ("UnidirectionalSequenceLstmTest_CpuRef_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ UnidirectionalSequenceLstmTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmTimeMajorTest_CpuRef_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ UnidirectionalSequenceLstmTimeMajorTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
+}
+
+} //End of TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRef")
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
new file mode 100644
index 0000000000..9d6ef87e3f
--- /dev/null
+++ b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -0,0 +1,722 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+#include <tensorflow/lite/c/common.h>
+
+#include <doctest/doctest.h>
+
+
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <armnn/Types.hpp>
+
+#include <initializer_list>
+#include <iterator>
+#include <vector>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType tensorType,
+ int32_t batchSize,
+ int32_t timeSize,
+ int32_t inputSize,
+ int32_t outputSize,
+ int32_t numUnits,
+ bool hasInputToInputWeights,
+ const std::vector<T>& inputToInputWeights,
+ const std::vector<T>& inputToForgetWeights,
+ const std::vector<T>& inputToCellWeights,
+ const std::vector<T>& inputToOutputWeights,
+ bool hasRecurrentToInputWeights,
+ const std::vector<T>& recurrentToInputWeights,
+ const std::vector<T>& recurrentToForgetWeights,
+ const std::vector<T>& recurrentToCellWeights,
+ const std::vector<T>& recurrentToOutputWeights,
+ bool hasCellToInputWeights,
+ const std::vector<T>& cellToInputWeights,
+ bool hasCellToForgetWeights,
+ const std::vector<T>& cellToForgetWeights,
+ bool hasCellToOutputWeights,
+ const std::vector<T>& cellToOutputWeights,
+ bool hasInputGateBias,
+ const std::vector<float>& inputGateBias,
+ const std::vector<float>& forgetGateBias,
+ const std::vector<float>& cellBias,
+ const std::vector<float>& outputGateBias,
+ bool hasProjectionWeights,
+ const std::vector<T>& projectionWeights,
+ bool hasProjectionBias,
+ const std::vector<float>& projectionBias,
+ bool hasInputLayerNormWeights,
+ const std::vector<float>& inputLayerNormWeights,
+ bool hasForgetLayerNormWeights,
+ const std::vector<float>& forgetLayerNormWeights,
+ bool hasCellLayerNormWeights,
+ const std::vector<float>& cellLayerNormWeights,
+ bool hasOutputLayerNormWeights,
+ const std::vector<float>& outputLayerNormWeights,
+ tflite::ActivationFunctionType activationFunction,
+ float clippingThresCell,
+ float clippingThresProj,
+ bool isTimeMajor,
+ float quantScale,
+ int quantOffset = 0)
+{
+
+ std::vector<int32_t> tensorInfo0{};
+ std::vector<int32_t> tensorInfoNumUnits{numUnits};
+ std::vector<int32_t> tensorInfoInputSize{numUnits, inputSize};
+ std::vector<int32_t> tensorInfoOutputSize{numUnits, outputSize};
+
+ std::vector<int32_t> inputShape;
+ std::vector<int32_t> outputShape;
+ if (isTimeMajor)
+ {
+ inputShape = {timeSize, batchSize, inputSize};
+ outputShape = {timeSize, batchSize, outputSize};
+ }
+ else
+ {
+ inputShape = {batchSize, timeSize, inputSize};
+ outputShape = {batchSize, timeSize, outputSize};
+ }
+ std::vector<int32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<int32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<int32_t> projectionWeightDimensions{outputSize, numUnits};
+ std::vector<int32_t> projectionBiasDimensions{outputSize};
+
+ std::vector<int> operatorInputs;
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ std::vector<flatbuffers::Offset<Tensor>> tensors;
+
+ auto quantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ 1.0f }),
+ flatBufferBuilder.CreateVector<int64_t>({ 0 }));
+
+ auto weightQuantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ quantScale }),
+ flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
+ inputShape.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("input_0")));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ if (hasInputToInputWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToInputWeights.data()),
+ sizeof(T) * inputToInputWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+ tensorInfoInputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("inputToInputWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToForgetWeights.data()),
+ sizeof(T) * inputToForgetWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+ tensorInfoInputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("inputToForgetWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToCellWeights.data()),
+ sizeof(T) * inputToCellWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+ tensorInfoInputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("inputToCellWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToOutputWeights.data()),
+ sizeof(T) * inputToOutputWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+ tensorInfoInputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("inputToOutputWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ if (hasRecurrentToInputWeights)
+ {
+ buffers.push_back(CreateBuffer(
+ flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(recurrentToInputWeights.data()),
+ sizeof(T) * recurrentToInputWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+ tensorInfoOutputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("recurrentToInputWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToForgetWeights.data()),
+ sizeof(T) * recurrentToForgetWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+ tensorInfoOutputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("recurrentToForgetWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToCellWeights.data()),
+ sizeof(T) * recurrentToCellWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+ tensorInfoOutputSize.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("recurrentToCellWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToOutputWeights.data()),
+ sizeof(T) * recurrentToOutputWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+ tensorInfoOutputSize.size()),
+ tensorType,
+ buffers.size() - 1 ,
+ flatBufferBuilder.CreateString("recurrentToOutputWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ if (hasCellToInputWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToInputWeights.data()),
+ sizeof(T) * cellToInputWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("cellToInputWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasCellToForgetWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToForgetWeights.data()),
+ sizeof(T) * cellToForgetWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("cellToForgetWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasCellToOutputWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToOutputWeights.data()),
+ sizeof(T) * cellToOutputWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("cellToOutputWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasInputGateBias)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputGateBias.data()),
+ sizeof(float) * inputGateBias.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("inputGateBias")));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(forgetGateBias.data()),
+ sizeof(float) * forgetGateBias.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("forgetGateBias")));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellBias.data()),
+ sizeof(float) * cellBias.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("cellBias")));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(outputGateBias.data()),
+ sizeof(float) * outputGateBias.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("outputGateBias")));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ if (hasProjectionWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionWeights.data()),
+ sizeof(T) * projectionWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(projectionWeightDimensions.data(),
+ projectionWeightDimensions.size()),
+ tensorType,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("projectionWeights"),
+ weightQuantizationParameters));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasProjectionBias)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionBias.data()),
+ sizeof(float) * projectionBias.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(projectionBiasDimensions.data(),
+ projectionBiasDimensions.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("projectionBias")));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
+ outputStateInDimensions.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("outputStateInInfo"),
+ quantizationParameters,
+ true));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
+ cellStateInDimensions.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("cellStateInInfo"),
+ quantizationParameters,
+ true));
+ operatorInputs.push_back(buffers.size() - 1);
+
+ if (hasInputLayerNormWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t *>(inputLayerNormWeights.data()),
+ sizeof(float) * inputLayerNormWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("inputLayerNormWeights")));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasForgetLayerNormWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t *>(forgetLayerNormWeights.data()),
+ sizeof(float) * forgetLayerNormWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("forgetLayerNormWeights")));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasCellLayerNormWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellLayerNormWeights.data()),
+ sizeof(float) * cellLayerNormWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("cellLayerNormWeights")));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+
+ if (hasOutputLayerNormWeights)
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t *>(outputLayerNormWeights.data()),
+ sizeof(float) * outputLayerNormWeights.size())));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+ tensorInfoNumUnits.size()),
+ ::tflite::TensorType_FLOAT32,
+ buffers.size() - 1,
+ flatBufferBuilder.CreateString("outputLayerNormWeights")));
+ operatorInputs.push_back(buffers.size() - 1);
+ }
+ else
+ {
+ operatorInputs.push_back(kTfLiteOptionalTensor);
+ }
+ int outputBufferId = buffers.size();
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ tensors.push_back(CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
+ outputShape.size()),
+ ::tflite::TensorType_FLOAT32,
+ outputBufferId,
+ flatBufferBuilder.CreateString("output")));
+ std::vector<int> operatorOutputs;
+ operatorOutputs.push_back(buffers.size() - 1);
+
+ // create operator
+ tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
+ flatbuffers::Offset<void> operatorBuiltinOptions =
+ CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor).Union();
+
+ flatbuffers::Offset<Operator> lstmOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType, operatorBuiltinOptions);
+
+ flatbuffers::Offset <SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ flatBufferBuilder.CreateVector(&lstmOperator, 1));
+
+ flatbuffers::Offset <flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
+ flatbuffers::Offset <OperatorCode> operatorCode =
+ CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
+
+ flatbuffers::Offset <Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel);
+
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
+ tflite::TensorType tensorType,
+ int32_t batchSize,
+ int32_t timeSize,
+ int32_t inputSize,
+ int32_t outputSize,
+ int32_t numUnits,
+ bool hasInputToInputWeights,
+ const std::vector<T>& inputToInputWeights,
+ const std::vector<T>& inputToForgetWeights,
+ const std::vector<T>& inputToCellWeights,
+ const std::vector<T>& inputToOutputWeights,
+ bool hasRecurrentToInputWeights,
+ const std::vector<T>& recurrentToInputWeights,
+ const std::vector<T>& recurrentToForgetWeights,
+ const std::vector<T>& recurrentToCellWeights,
+ const std::vector<T>& recurrentToOutputWeights,
+ bool hasCellToInputWeights,
+ const std::vector<T>& cellToInputWeights,
+ bool hasCellToForgetWeights,
+ const std::vector<T>& cellToForgetWeights,
+ bool hasCellToOutputWeights,
+ const std::vector<T>& cellToOutputWeights,
+ bool hasInputGateBias,
+ const std::vector<float>& inputGateBias,
+ const std::vector<float>& forgetGateBias,
+ const std::vector<float>& cellBias,
+ const std::vector<float>& outputGateBias,
+ bool hasProjectionWeights,
+ const std::vector<T>& projectionWeights,
+ bool hasProjectionBias,
+ const std::vector<float>& projectionBias,
+ bool hasInputLayerNormWeights,
+ const std::vector<float>& inputLayerNormWeights,
+ bool hasForgetLayerNormWeights,
+ const std::vector<float>& forgetLayerNormWeights,
+ bool hasCellLayerNormWeights,
+ const std::vector<float>& cellLayerNormWeights,
+ bool hasOutputLayerNormWeights,
+ const std::vector<float>& outputLayerNormWeights,
+ std::vector<float>& inputValues,
+ std::vector<float>& expectedOutputValues,
+ tflite::ActivationFunctionType activationFunction,
+ float clippingThresCell,
+ float clippingThresProj,
+ bool isTimeMajor,
+ float quantScale = 0.1f)
+{
+ using namespace tflite;
+
+ std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
+ batchSize,
+ timeSize,
+ inputSize,
+ outputSize,
+ numUnits,
+ hasInputToInputWeights,
+ inputToInputWeights,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ hasRecurrentToInputWeights,
+ recurrentToInputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ hasCellToInputWeights,
+ cellToInputWeights,
+ hasCellToForgetWeights,
+ cellToForgetWeights,
+ hasCellToOutputWeights,
+ cellToOutputWeights,
+ hasInputGateBias,
+ inputGateBias,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ hasProjectionWeights,
+ projectionWeights,
+ hasProjectionBias,
+ projectionBias,
+ hasInputLayerNormWeights,
+ inputLayerNormWeights,
+ hasForgetLayerNormWeights,
+ forgetLayerNormWeights,
+ hasCellLayerNormWeights,
+ cellLayerNormWeights,
+ hasOutputLayerNormWeights,
+ outputLayerNormWeights,
+ activationFunction,
+ clippingThresCell,
+ clippingThresProj,
+ isTimeMajor,
+ quantScale);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data
+ auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+ auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ tfLiteDelageInputData[i] = inputValues[i];
+ }
+
+ auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+ auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ armnnDelegateInputData[i] = inputValues[i];
+ }
+
+ // Run EnqueueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ // Compare output data
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+ if (tensorType == ::tflite::TensorType_INT8)
+ {
+ // Allow 2% tolerance for Quantized weights
+ armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+ expectedOutputValues.size(), 2);
+ armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+ expectedOutputValues.size(), 2);
+ armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData,
+ expectedOutputValues.size(), 2);
+ }
+ else
+ {
+ armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+ }
+}
+
+} // anonymous namespace \ No newline at end of file