From 8ae17b3f69d43b6c85c073d6420441858f20afaa Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 17 Feb 2021 13:45:50 +0000 Subject: IVGCVSW-5394 TfLiteDelegate: Implement the Lstm operator * Add LSTM operator Signed-off-by: Mike Kelly Signed-off-by: Sadik Armagan Change-Id: If8c667685fa1176738ffe2e6d08b1c684e7ee6b2 --- delegate/src/test/LstmTest.cpp | 189 ++++++++++ delegate/src/test/LstmTestHelper.hpp | 691 +++++++++++++++++++++++++++++++++++ 2 files changed, 880 insertions(+) create mode 100644 delegate/src/test/LstmTest.cpp create mode 100644 delegate/src/test/LstmTestHelper.hpp (limited to 'delegate/src/test') diff --git a/delegate/src/test/LstmTest.cpp b/delegate/src/test/LstmTest.cpp new file mode 100644 index 0000000000..1fa9f0c8bf --- /dev/null +++ b/delegate/src/test/LstmTest.cpp @@ -0,0 +1,189 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "LstmTestHelper.hpp" + +#include + +#include +#include +#include + +namespace armnnDelegate +{ + +void LstmTest(std::vector& backends) +{ + int32_t batchSize = 2; + int32_t inputSize = 2; + int32_t outputSize = 4; + // cellSize and outputSize have the same size when there is no projection. + int32_t numUnits = outputSize; + + std::vector inputShape {batchSize , inputSize}; + std::vector cellStateInTensorInfo {batchSize , numUnits}; + std::vector outputStateInTensorInfo {batchSize , outputSize}; + + std::vector scratchBufferTensorInfo {batchSize, numUnits * 4}; + std::vector cellStateOutTensorInfo {batchSize, numUnits}; + std::vector outputStateOutTensorInfo {batchSize, outputSize}; + std::vector outputTensorInfo {batchSize, outputSize}; + + std::vector tensorInfo4 {numUnits}; + std::vector tensorInfo8 {numUnits, 2}; + std::vector tensorInfo16 {numUnits, 4}; + + //tensorInfo8, + bool hasInputToInputWeights = true; + std::vector inputToInputWeights {-0.45018822f, -0.02338299f, -0.0870589f, + -0.34550029f, 0.04266912f, -0.15680569f, + -0.34856534f, 0.43890524f}; + + std::vector inputToForgetWeights {0.09701663f, 0.20334584f, -0.50592935f, + -0.31343272f, -0.40032279f, 0.44781327f, + 0.01387155f, -0.35593212f}; + + std::vector inputToCellWeights {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, + -0.20583314f, 0.44344562f, 0.22077113f, + -0.29909778f}; + + std::vector inputToOutputWeights {-0.25065863f, -0.28290087f, 0.04613829f, + 0.40525138f, 0.44272184f, 0.03897077f, + -0.1556896f, 0.19487578f}; + + //tensorInfo16, + bool hasRecurrentToInputWeights = true; + std::vector recurrentToInputWeights {-0.0063535f, -0.2042388f, 0.31454784f, + -0.35746509f, 0.28902304f, 0.08183324f, + -0.16555229f, 0.02286911f, -0.13566875f, + 0.03034258f, 0.48091322f, -0.12528998f, + 0.24077177f, -0.51332325f, -0.33502164f, + 0.10629296f}; + + std::vector recurrentToForgetWeights {-0.48684245f, -0.06655136f, 0.42224967f, + 0.2112639f, 0.27654213f, 0.20864892f, + -0.07646349f, 0.45877004f, 0.00141793f, + -0.14609534f, 0.36447752f, 0.09196436f, + 0.28053468f, 0.01560611f, -0.20127171f, + -0.01140004f}; + + std::vector recurrentToCellWeights {-0.3407414f, 0.24443203f, -0.2078532f, + 0.26320225f, 0.05695659f, -0.00123841f, + -0.4744786f, -0.35869038f, -0.06418842f, + -0.13502428f, -0.501764f, 0.22830659f, + -0.46367589f, 0.26016325f, -0.03894562f, + -0.16368064f}; + + std::vector recurrentToOutputWeights {0.43385774f, -0.17194885f, 0.2718237f, + 0.09215671f, 0.24107647f, -0.39835793f, + 0.18212086f, 0.01301402f, 0.48572797f, + -0.50656658f, 0.20047462f, -0.20607421f, + -0.51818722f, -0.15390486f, 0.0468148f, + 0.39922136f}; + // tensorInfo4 + bool hasCellToInputWeights = false; + std::vector cellToInputWeights {}; + bool hasCellToForgetWeights = false; + std::vector cellToForgetWeights {}; + bool hasCellToOutputWeights = false; + std::vector cellToOutputWeights {}; + + bool hasInputGateBias = true; + std::vector inputGateBias {0., 0., 0., 0.}; + std::vector forgetGateBias {1., 1., 1., 1.}; + std::vector cellBias {0., 0., 0., 0.}; + std::vector outputGateBias {0., 0., 0., 0.}; + + bool hasProjectionWeights = false; + std::vector projectionWeights; + bool hasProjectionBias = false; + std::vector projectionBias; + + bool hasInputLayerNormWeights = false; + std::vector inputLayerNormWeights; + bool hasForgetLayerNormWeights = false; + std::vector forgetLayerNormWeights; + bool hasCellLayerNormWeights = false; + std::vector cellLayerNormWeights; + bool hasOutputLayerNormWeights = false; + std::vector outputLayerNormWeights; + + std::vector inputValues {2., 3., 3., 4.}; + std::vector expectedOutputValues {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, + -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}; + + tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH; + float clippingThresCell = 0.f; + float clippingThresProj = 0.f; + + LstmTestImpl(backends, + ::tflite::TensorType_FLOAT32, + batchSize, + inputSize, + outputSize, + numUnits, + hasInputToInputWeights, + inputToInputWeights, + inputToForgetWeights, + inputToCellWeights, + inputToOutputWeights, + hasRecurrentToInputWeights, + recurrentToInputWeights, + recurrentToForgetWeights, + recurrentToCellWeights, + recurrentToOutputWeights, + hasCellToInputWeights, + cellToInputWeights, + hasCellToForgetWeights, + cellToForgetWeights, + hasCellToOutputWeights, + cellToOutputWeights, + hasInputGateBias, + inputGateBias, + forgetGateBias, + cellBias, + outputGateBias, + hasProjectionWeights, + projectionWeights, + hasProjectionBias, + projectionBias, + hasInputLayerNormWeights, + inputLayerNormWeights, + hasForgetLayerNormWeights, + forgetLayerNormWeights, + hasCellLayerNormWeights, + cellLayerNormWeights, + hasOutputLayerNormWeights, + outputLayerNormWeights, + inputValues, + expectedOutputValues, + activationFunction, + clippingThresCell, + clippingThresProj); +} + +TEST_SUITE("LstmTest_CpuRefTests") +{ + +TEST_CASE ("LstmTest_CpuRef_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + LstmTest(backends); +} + +} //End of TEST_SUITE("Convolution2dTest_CpuRef") + +TEST_SUITE("LstmTest_CpuAccTests") +{ + +TEST_CASE ("LstmTest_CpuAcc_Test") +{ + std::vector backends = {armnn::Compute::CpuAcc}; + LstmTest(backends); +} + +} //End of TEST_SUITE("Convolution2dTest_CpuAcc") + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/LstmTestHelper.hpp b/delegate/src/test/LstmTestHelper.hpp new file mode 100644 index 0000000000..36a606119a --- /dev/null +++ b/delegate/src/test/LstmTestHelper.hpp @@ -0,0 +1,691 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +template +std::vector CreateLstmTfLiteModel(tflite::TensorType tensorType, + int32_t batchSize, + int32_t inputSize, + int32_t outputSize, + int32_t numUnits, + bool hasInputToInputWeights, + const std::vector& inputToInputWeights, + const std::vector& inputToForgetWeights, + const std::vector& inputToCellWeights, + const std::vector& inputToOutputWeights, + bool hasRecurrentToInputWeights, + const std::vector& recurrentToInputWeights, + const std::vector& recurrentToForgetWeights, + const std::vector& recurrentToCellWeights, + const std::vector& recurrentToOutputWeights, + bool hasCellToInputWeights, + const std::vector& cellToInputWeights, + bool hasCellToForgetWeights, + const std::vector& cellToForgetWeights, + bool hasCellToOutputWeights, + const std::vector& cellToOutputWeights, + bool hasInputGateBias, + const std::vector& inputGateBias, + const std::vector& forgetGateBias, + const std::vector& cellBias, + const std::vector& outputGateBias, + bool hasProjectionWeights, + const std::vector& projectionWeights, + bool hasProjectionBias, + const std::vector& projectionBias, + bool hasInputLayerNormWeights, + const std::vector& inputLayerNormWeights, + bool hasForgetLayerNormWeights, + const std::vector& forgetLayerNormWeights, + bool hasCellLayerNormWeights, + const std::vector& cellLayerNormWeights, + bool hasOutputLayerNormWeights, + const std::vector& outputLayerNormWeights, + tflite::ActivationFunctionType activationFunction, + float clippingThresCell, + float clippingThresProj, + float quantScale = 1.0f, + int quantOffset = 0, + float outputQuantScale = 2.0f, + int outputQuantOffset = 0) +{ + + std::vector tensorInfo0 {}; + std::vector tensorInfo4 {numUnits}; + std::vector tensorInfo8 {numUnits, static_cast(2)}; + std::vector tensorInfo16 {numUnits, static_cast(4)}; + + std::vector inputShape {batchSize , inputSize}; + std::vector outputShape {batchSize , outputSize}; + + std::vector outputStateInDimensions{batchSize, outputSize}; + std::vector cellStateInDimensions{batchSize, numUnits}; + + std::vector operatorInputs; + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + std::vector> buffers; + std::vector> tensors; + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + auto outputQuantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ outputQuantScale }), + flatBufferBuilder.CreateVector({ outputQuantOffset })); + + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputShape.data(), + inputShape.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("input_0"), + quantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + if (hasInputToInputWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(inputToInputWeights.data()), + sizeof(T) * inputToInputWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo8.data(), + tensorInfo8.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("inputToInputWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(inputToForgetWeights.data()), + sizeof(T) * inputToForgetWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo8.data(), + tensorInfo8.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("inputToForgetWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(inputToCellWeights.data()), + sizeof(T) * inputToCellWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo8.data(), + tensorInfo8.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("inputToCellWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(inputToOutputWeights.data()), + sizeof(T) * inputToOutputWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo8.data(), + tensorInfo8.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("inputToOutputWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + if (hasRecurrentToInputWeights) + { + buffers.push_back(CreateBuffer( + flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToInputWeights.data()), + sizeof(T) * recurrentToInputWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo16.data(), + tensorInfo16.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("recurrentToInputWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToForgetWeights.data()), + sizeof(T) * recurrentToForgetWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo16.data(), + tensorInfo16.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("recurrentToForgetWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToCellWeights.data()), + sizeof(T) * recurrentToCellWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo16.data(), + tensorInfo16.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("recurrentToCellWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToOutputWeights.data()), + sizeof(T) * recurrentToOutputWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo16.data(), + tensorInfo16.size()), + tensorType, + buffers.size() - 1 , + flatBufferBuilder.CreateString("recurrentToOutputWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + if (hasCellToInputWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(cellToInputWeights.data()), + sizeof(T) * cellToInputWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("cellToInputWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasCellToForgetWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(cellToForgetWeights.data()), + sizeof(T) * cellToForgetWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("cellToForgetWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasCellToOutputWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(cellToOutputWeights.data()), + sizeof(T) * cellToOutputWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("cellToOutputWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasInputGateBias) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(inputGateBias.data()), + sizeof(T) * inputGateBias.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("inputGateBias"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(forgetGateBias.data()), + sizeof(T) * forgetGateBias.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("forgetGateBias"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(cellBias.data()), + sizeof(T) * cellBias.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("cellBias"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(outputGateBias.data()), + sizeof(T) * outputGateBias.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("outputGateBias"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + + if (hasProjectionWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(projectionWeights.data()), + sizeof(T) * projectionWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("outputGateBias"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasProjectionBias) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(projectionBias.data()), + sizeof(T) * projectionBias.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("projectionBias"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputStateInDimensions.data(), + outputStateInDimensions.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("outputStateInInfo"), + outputQuantizationParameters, + true)); + operatorInputs.push_back(buffers.size() - 1); + + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(cellStateInDimensions.data(), + cellStateInDimensions.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("cellStateInInfo"), + outputQuantizationParameters, + true)); + operatorInputs.push_back(buffers.size() - 1); + + if (hasInputLayerNormWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector( + reinterpret_cast(inputLayerNormWeights.data()), + sizeof(T) * inputLayerNormWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("inputLayerNormWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasForgetLayerNormWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector( + reinterpret_cast(forgetLayerNormWeights.data()), + sizeof(T) * forgetLayerNormWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("forgetLayerNormWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasCellLayerNormWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(cellLayerNormWeights.data()), + sizeof(T) * cellLayerNormWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("cellLayerNormWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + + if (hasOutputLayerNormWeights) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector( + reinterpret_cast(outputLayerNormWeights.data()), + sizeof(T) * outputLayerNormWeights.size()))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensorInfo4.data(), + tensorInfo4.size()), + tensorType, + buffers.size() - 1, + flatBufferBuilder.CreateString("outputLayerNormWeights"), + outputQuantizationParameters)); + operatorInputs.push_back(buffers.size() - 1); + } + else + { + operatorInputs.push_back(kTfLiteOptionalTensor); + } + int outputBufferId = buffers.size(); + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + tensors.push_back(CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputShape.data(), + outputShape.size()), + tensorType, + outputBufferId, + flatBufferBuilder.CreateString("output"), + outputQuantizationParameters)); + std::vector operatorOutputs; + operatorOutputs.push_back(buffers.size() - 1); + + // create operator + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_LSTMOptions; + flatbuffers::Offset operatorBuiltinOptions = + CreateLSTMOptions(flatBufferBuilder, + activationFunction, + clippingThresCell, + clippingThresProj).Union(); + + flatbuffers::Offset lstmOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, operatorBuiltinOptions); + + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + flatBufferBuilder.CreateVector(&lstmOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: LSTM Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_LSTM); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void LstmTestImpl(std::vector& backends, + tflite::TensorType tensorType, + int32_t batchSize, + int32_t inputSize, + int32_t outputSize, + int32_t numUnits, + bool hasInputToInputWeights, + const std::vector& inputToInputWeights, + const std::vector& inputToForgetWeights, + const std::vector& inputToCellWeights, + const std::vector& inputToOutputWeights, + bool hasRecurrentToInputWeights, + const std::vector& recurrentToInputWeights, + const std::vector& recurrentToForgetWeights, + const std::vector& recurrentToCellWeights, + const std::vector& recurrentToOutputWeights, + bool hasCellToInputWeights, + const std::vector& cellToInputWeights, + bool hasCellToForgetWeights, + const std::vector& cellToForgetWeights, + bool hasCellToOutputWeights, + const std::vector& cellToOutputWeights, + bool hasInputGateBias, + const std::vector& inputGateBias, + const std::vector& forgetGateBias, + const std::vector& cellBias, + const std::vector& outputGateBias, + bool hasProjectionWeights, + const std::vector& projectionWeights, + bool hasProjectionBias, + const std::vector& projectionBias, + bool hasInputLayerNormWeights, + const std::vector& inputLayerNormWeights, + bool hasForgetLayerNormWeights, + const std::vector& forgetLayerNormWeights, + bool hasCellLayerNormWeights, + const std::vector& cellLayerNormWeights, + bool hasOutputLayerNormWeights, + const std::vector& outputLayerNormWeights, + std::vector& inputValues, + std::vector& expectedOutputValues, + tflite::ActivationFunctionType activationFunction, + float clippingThresCell, + float clippingThresProj) +{ + using namespace tflite; + + std::vector modelBuffer = CreateLstmTfLiteModel(tensorType, + batchSize, + inputSize, + outputSize, + numUnits, + hasInputToInputWeights, + inputToInputWeights, + inputToForgetWeights, + inputToCellWeights, + inputToOutputWeights, + hasRecurrentToInputWeights, + recurrentToInputWeights, + recurrentToForgetWeights, + recurrentToCellWeights, + recurrentToOutputWeights, + hasCellToInputWeights, + cellToInputWeights, + hasCellToForgetWeights, + cellToForgetWeights, + hasCellToOutputWeights, + cellToOutputWeights, + hasInputGateBias, + inputGateBias, + forgetGateBias, + cellBias, + outputGateBias, + hasProjectionWeights, + projectionWeights, + hasProjectionBias, + projectionBias, + hasInputLayerNormWeights, + inputLayerNormWeights, + hasForgetLayerNormWeights, + forgetLayerNormWeights, + hasCellLayerNormWeights, + cellLayerNormWeights, + hasOutputLayerNormWeights, + outputLayerNormWeights, + activationFunction, + clippingThresCell, + clippingThresProj); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteDelageInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + + armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size()); + armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size()); + armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); +} + +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1