aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-05-26 10:41:54 +0100
committerSadik Armagan <sadik.armagan@arm.com>2020-05-26 10:41:54 +0100
commit6a903a78a637f240a5a5a13fffa36fd0cfbdcf7d (patch)
tree33aeeb92207bbde28a8a5747bdcc33c82846494f
parenta0301f1677d171e6135bae725f8be821e89b176f (diff)
downloadandroid-nn-driver-6a903a78a637f240a5a5a13fffa36fd0cfbdcf7d.tar.gz
IVGCVSW-4850 Create QLSTM unit test in android-nn-driver
* Added QLSTM unit tests Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ibb7587d8a4fae4a630e7e80f4c3ce830665a7c77
-rw-r--r--test/1.3/QLstm.cpp879
-rw-r--r--test/Android.mk97
-rw-r--r--test/DriverTestHelpers.cpp65
-rw-r--r--test/DriverTestHelpers.hpp361
-rw-r--r--test/Lstm.hpp2
5 files changed, 1296 insertions, 108 deletions
diff --git a/test/1.3/QLstm.cpp b/test/1.3/QLstm.cpp
new file mode 100644
index 00000000..b4308d2c
--- /dev/null
+++ b/test/1.3/QLstm.cpp
@@ -0,0 +1,879 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../DriverTestHelpers.hpp"
+#include "../TestTensor.hpp"
+
+#include "../1.3/HalPolicy.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/array.hpp>
+#include <boost/test/unit_test.hpp>
+#include <boost/test/data/test_case.hpp>
+#include <boost/math/special_functions/relative_difference.hpp>
+
+BOOST_AUTO_TEST_SUITE(QLSTMTests)
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+
+using namespace driverTestHelpers;
+using namespace android::hardware;
+
+using HalPolicy = hal_1_3::HalPolicy;
+
+namespace
+{
+
+template<typename T>
+RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
+{
+ DataLocation inputInloc = {};
+ inputInloc.poolIndex = poolIndex;
+ inputInloc.offset = 0;
+ inputInloc.length = value.size() * sizeof(T);
+ RequestArgument inputRequestArgument = {};
+ inputRequestArgument.location = inputInloc;
+ inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
+ return inputRequestArgument;
+}
+
+// Returns true if the relative difference between two float values is less than the tolerance value given.
+// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
+bool TolerantCompareEqual(float a, float b, float tolerance = 1.0f)
+{
+ float rd;
+ if (a == 0.0f)
+ {
+ rd = fabs(b);
+ }
+ else if (b == 0.0f)
+ {
+ rd = fabs(a);
+ }
+ else
+ {
+ rd = boost::math::relative_difference(a, b);
+ }
+ return rd < tolerance;
+}
+
+// Helper function to create an OperandLifeTime::NO_VALUE for testing.
+// To be used on optional input operands that have no values - these are valid and should be tested.
+HalPolicy::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
+{
+ // Only create a NO_VALUE for optional operands that have no elements
+ if (dimensions.size() == 0 || dimensions[0] == 0)
+ {
+ return HalPolicy::OperandLifeTime::NO_VALUE;
+ }
+ return HalPolicy::OperandLifeTime::CONSTANT_COPY;
+}
+
+void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model,
+ armnn_driver::ArmnnDriver& driver,
+ const V1_0::Request& request)
+{
+ android::sp<V1_3::IPreparedModel> preparedModel = PrepareModel_1_3(model, driver);
+ if (preparedModel.get() != nullptr)
+ {
+ Execute(preparedModel, request);
+ }
+}
+
+#ifndef ARMCOMPUTECL_ENABLED
+static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
+#else
+static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
+#endif
+
+// Add our own tests here since we skip the qlstm tests which Google supplies (because of non-const weights)
+void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
+ const std::vector<int8_t>& inputValue,
+ const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
+ const std::vector<int8_t>& inputToInputWeightsValue,
+ const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
+ const std::vector<int8_t>& inputToForgetWeightsValue,
+ const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
+ const std::vector<int8_t>& inputToCellWeightsValue,
+ const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
+ const std::vector<int8_t>& inputToOutputWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
+ const std::vector<int8_t>& recurrentToInputWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
+ const std::vector<int8_t>& recurrentToForgetWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
+ const std::vector<int8_t>& recurrentToCellWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
+ const std::vector<int8_t>& recurrentToOutputWeightsValue,
+ const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
+ const std::vector<int16_t>& cellToInputWeightsValue,
+ const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
+ const std::vector<int16_t>& cellToForgetWeightsValue,
+ const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
+ const std::vector<int16_t>& cellToOutputWeightsValue,
+ const hidl_vec<uint32_t>& inputGateBiasDimensions,
+ const std::vector<int32_t>& inputGateBiasValue,
+ const hidl_vec<uint32_t>& forgetGateBiasDimensions,
+ const std::vector<int32_t>& forgetGateBiasValue,
+ const hidl_vec<uint32_t>& cellBiasDimensions,
+ const std::vector<int32_t>& cellBiasValue,
+ const hidl_vec<uint32_t>& outputGateBiasDimensions,
+ const std::vector<int32_t>& outputGateBiasValue,
+ const hidl_vec<uint32_t>& projectionWeightsDimensions,
+ const std::vector<int8_t>& projectionWeightsValue,
+ const hidl_vec<uint32_t>& projectionBiasDimensions,
+ const std::vector<int32_t>& projectionBiasValue,
+ const hidl_vec<uint32_t>& outputPreviousTimeStepInDimensions,
+ const std::vector<int8_t>& outputPreviousTimeStepInValue,
+ const hidl_vec<uint32_t>& cellStatePreviousTimeStepInDimensions,
+ const std::vector<int16_t>& cellStatePreviousTimeStepInValue,
+ const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
+ const std::vector<int16_t>& inputLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
+ const std::vector<int16_t>& forgetLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
+ const std::vector<int16_t>& cellLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
+ const std::vector<int16_t>& outputLayerNormWeightsValue,
+ const float& cellClipValue,
+ const float& projectionClipValue,
+ const float& matMulInputGateValue,
+ const float& matMulForgetGateValue,
+ const float& matMulCellGateValue,
+ const float& matMulOutputGateValue,
+ const int32_t& projInputZeroPointValue,
+ const float& projInputScaleValue,
+ const hidl_vec<uint32_t>& outputStateOutDimensions,
+ const std::vector<int8_t>& outputStateOutValue,
+ const hidl_vec<uint32_t>& cellStateOutDimensions,
+ const std::vector<int16_t>& cellStateOutValue,
+ const hidl_vec<uint32_t>& outputDimensions,
+ const std::vector<int8_t>& outputValue,
+ armnn::Compute compute)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
+ HalPolicy::Model model = {};
+
+ // Scale/Offset quantization info
+ float inputScale = 0.0078125f;
+ int32_t inputOffset = 0;
+
+ int32_t hiddenStateZeroPoint = 0;
+ float hiddenStateScale = 0.007f;
+
+ float outputScale = hiddenStateScale;
+ int32_t outputOffset = hiddenStateZeroPoint;
+
+ float cellStateScale = 3.05176e-05f;
+ float cellWeightsScale = 1.0f;
+ int32_t cellStateOffset = 0;
+
+ float weightsScale = 0.00784314f;
+ int32_t weightsOffset = 0;
+
+ float layerNormScale = 3.05182e-05f;
+ int32_t layerNormOffset = 0;
+
+ float biasScale = layerNormScale / 1024;
+ int32_t biasOffset = 0;
+
+ // Inputs:
+ // 00: The input to the LSTM cell. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, inputSize]
+ AddInputOperand<HalPolicy>(model,
+ inputDimensions,
+ HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ inputScale,
+ inputOffset);
+
+ // 01: The input-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
+ AddTensorOperand<HalPolicy>(model,
+ inputToInputWeightsDimensions,
+ inputToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(inputToInputWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 02: The input-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
+ AddTensorOperand<HalPolicy>(model,
+ inputToForgetWeightsDimensions,
+ inputToForgetWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(inputToForgetWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 03: The input-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
+ AddTensorOperand<HalPolicy>(model,
+ inputToCellWeightsDimensions,
+ inputToCellWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(inputToCellWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 04: The input-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
+ AddTensorOperand<HalPolicy>(model,
+ inputToOutputWeightsDimensions,
+ inputToOutputWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(inputToOutputWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 05: The recurrent-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM
+ // Shape: [numUnits, outputSize]
+ AddTensorOperand<HalPolicy>(model,
+ recurrentToInputWeightsDimensions,
+ recurrentToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 06: The recurrent-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
+ AddTensorOperand<HalPolicy>(model,
+ recurrentToForgetWeightsDimensions,
+ recurrentToForgetWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 07: The recurrent-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
+ AddTensorOperand<HalPolicy>(model,
+ recurrentToCellWeightsDimensions,
+ recurrentToCellWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 08: The recurrent-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
+ AddTensorOperand<HalPolicy>(model,
+ recurrentToOutputWeightsDimensions,
+ recurrentToOutputWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
+ weightsScale,
+ weightsOffset);
+
+ // 09: The cell-to-input weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
+ // Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ cellToInputWeightsDimensions,
+ cellToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM ,
+ CreateNoValueLifeTime(cellToInputWeightsDimensions),
+ cellWeightsScale,
+ weightsOffset);
+
+ // 10: The cell-to-forget weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
+ // Shape: [numUnits].
+ AddTensorOperand<HalPolicy>(model,
+ cellToForgetWeightsDimensions,
+ cellToForgetWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ CreateNoValueLifeTime(cellToForgetWeightsDimensions),
+ cellWeightsScale,
+ weightsOffset);
+
+ // 11: The cell-to-output weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
+ // Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ cellToOutputWeightsDimensions,
+ cellToOutputWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ CreateNoValueLifeTime(cellToOutputWeightsDimensions),
+ cellWeightsScale,
+ weightsOffset);
+
+ // 12: The input gate bias. Quantized with scale being the product of input and weights scales
+ // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ inputGateBiasDimensions,
+ inputGateBiasValue,
+ HalPolicy::OperandType::TENSOR_INT32,
+ CreateNoValueLifeTime(inputGateBiasDimensions),
+ biasScale,
+ biasOffset);
+
+ // 13: The forget gate bias. Quantized with scale being the product of input and weights scales
+ // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ forgetGateBiasDimensions,
+ forgetGateBiasValue,
+ HalPolicy::OperandType::TENSOR_INT32,
+ CreateNoValueLifeTime(forgetGateBiasDimensions),
+ biasScale,
+ biasOffset);
+
+ // 14: The cell bias. Quantized with scale being the product of input and weights scales and zeroPoint equal to 0.
+ // Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ cellBiasDimensions,
+ cellBiasValue,
+ HalPolicy::OperandType::TENSOR_INT32,
+ CreateNoValueLifeTime(cellBiasDimensions),
+ biasScale,
+ biasOffset);
+
+ // 15: The output gate bias. Quantized with scale being the product of input and weights scales
+ // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ outputGateBiasDimensions,
+ outputGateBiasValue,
+ HalPolicy::OperandType::TENSOR_INT32,
+ CreateNoValueLifeTime(outputGateBiasDimensions),
+ biasScale,
+ biasOffset);
+
+ // 16: The projection weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [outputSize, numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ projectionWeightsDimensions,
+ projectionWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
+ CreateNoValueLifeTime(projectionWeightsDimensions),
+ 0.00392157f,
+ weightsOffset);
+
+ // 17: The projection bias. Quantized with scale being the product of input and weights scales
+ // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [outputSize]
+ AddTensorOperand<HalPolicy>(model,
+ projectionBiasDimensions,
+ projectionBiasValue,
+ HalPolicy::OperandType::TENSOR_INT32,
+ CreateNoValueLifeTime(projectionBiasDimensions),
+ 0.0f,
+ biasOffset);
+
+ // 18: The output from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
+ // Shape: [batchSize, outputSize]
+ AddInputOperand<HalPolicy>(model,
+ outputPreviousTimeStepInDimensions,
+ HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ cellStateScale,
+ inputOffset);
+
+ // 19: The cell state from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
+ // Shape: [batchSize, numUnits]
+ AddInputOperand<HalPolicy>(model,
+ cellStatePreviousTimeStepInDimensions,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ cellStateScale,
+ cellStateOffset);
+
+ // If any of the tensors have a value all normalization tensors are set
+ if (!inputLayerNormWeightsValue.empty() ||
+ !forgetLayerNormWeightsValue.empty() ||
+ !cellLayerNormWeightsValue.empty() ||
+ !outputLayerNormWeightsValue.empty())
+ {
+ // Normalization:
+ // 20: The input layer normalization weights. Used to rescale normalized inputs to activation at input gate.
+ // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ inputLayerNormWeightsDimensions,
+ inputLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ CreateNoValueLifeTime(inputLayerNormWeightsDimensions),
+ layerNormScale,
+ layerNormOffset);
+
+ // 21: The forget layer normalization weights. Used to rescale normalized inputs to activation at forget gate.
+ // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ forgetLayerNormWeightsDimensions,
+ forgetLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ CreateNoValueLifeTime(forgetLayerNormWeightsDimensions),
+ layerNormScale,
+ layerNormOffset);
+
+ // 22: The cell layer normalization weights. Used to rescale normalized inputs to activation at cell gate.
+ // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ cellLayerNormWeightsDimensions,
+ cellLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ CreateNoValueLifeTime(cellLayerNormWeightsDimensions),
+ layerNormScale,
+ layerNormOffset);
+
+ // 23: The output layer normalization weights. Used to rescale normalized inputs to activation at output gate.
+ // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
+ AddTensorOperand<HalPolicy>(model,
+ outputLayerNormWeightsDimensions,
+ outputLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ CreateNoValueLifeTime(outputLayerNormWeightsDimensions),
+ layerNormScale,
+ layerNormOffset);
+ }
+
+ // Constant scalar values
+ // 24: The cell clip. If provided the cell state is clipped by this value prior to the cell output activation.
+ // Optional. Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, cellClipValue);
+
+ // Constant scalar values
+ // 25: The projection clip. If provided and projection is enabled, this is used for clipping the projected values.
+ // Optional. Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, projectionClipValue);
+
+ // Constant scalar values
+ // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
+ // Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, matMulInputGateValue);
+
+ // Constant scalar values
+ // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
+ // Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, matMulForgetGateValue);
+
+ // Constant scalar values
+ // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
+ // Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, matMulCellGateValue);
+
+ // Constant scalar values
+ // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
+ // Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, matMulOutputGateValue);
+
+ // Constant scalar values
+ // 30: The zero point of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_INT32.
+ AddIntOperand<HalPolicy>(model, projInputZeroPointValue);
+
+ // Constant scalar values
+ // 31: The scale of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_FLOAT32.
+ AddFloatOperand<HalPolicy>(model, projInputScaleValue);
+
+ // Outputs:
+ // 0: The output state (out). Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize]
+ AddOutputOperand<HalPolicy>(model,
+ outputStateOutDimensions,
+ HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ cellStateScale,
+ cellStateScale);
+
+ // 1: The cell state (out). Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [batchSize, numUnits].
+ AddOutputOperand<HalPolicy>(model,
+ cellStateOutDimensions,
+ HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
+ cellStateScale,
+ cellStateOffset);
+
+ // 2: The output. This is effectively the same as the current "output state (out)" value.
+ // Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize]
+ AddOutputOperand<HalPolicy>(model,
+ outputDimensions,
+ HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ cellStateScale,
+ cellStateScale);
+
+ // make the QUANTIZED_LSTM operation
+ model.main.operations.resize(1);
+ model.main.operations[0].type = HalPolicy::OperationType::QUANTIZED_LSTM;
+
+ model.main.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31};
+ model.main.operations[0].outputs = hidl_vec<uint32_t> {32, 33, 34};
+
+ // define the input values
+ hidl_vec<RequestArgument> inputArguments;
+ inputArguments.resize(3);
+
+ inputArguments[0] = CreateRequestArgument<int8_t>(inputValue, 0);
+ inputArguments[1] = CreateRequestArgument<int8_t>(outputPreviousTimeStepInValue, 1);
+ inputArguments[2] = CreateRequestArgument<int16_t>(cellStatePreviousTimeStepInValue, 2);
+
+ // define the expected output values
+ hidl_vec<RequestArgument> outputArguments;
+ outputArguments.resize(3);
+
+ outputArguments[0] = CreateRequestArgument<int8_t>(outputStateOutValue, 3);
+ outputArguments[1] = CreateRequestArgument<int16_t>(cellStateOutValue, 4);
+ outputArguments[2] = CreateRequestArgument<int8_t>(outputValue, 5);
+
+ android::hardware::neuralnetworks::V1_0::Request request = {};
+ request.inputs = inputArguments;
+ request.outputs = outputArguments;
+
+ // set the input data
+ AddPoolAndSetData(inputValue.size(), request, inputValue.data());
+ AddPoolAndSetData(outputPreviousTimeStepInValue.size(), request, outputPreviousTimeStepInValue.data());
+ AddPoolAndSetData(cellStatePreviousTimeStepInValue.size(), request, cellStatePreviousTimeStepInValue.data());
+
+ // add memory for the outputs
+ android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<int8_t>(outputStateOutValue.size(), request);
+ int8_t* outputStateOutData = static_cast<int8_t*>(static_cast<void*>(outputStateOutMemory->getPointer()));
+
+ android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
+ int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
+
+ android::sp<IMemory> outputMemory = AddPoolAndGetData<int8_t>(outputValue.size(), request);
+ int8_t* outputData = static_cast<int8_t*>(static_cast<void*>(outputMemory->getPointer()));
+
+ // make the prepared model and run the execution
+ ExecuteModel(model, *driver, request);
+
+ // check the results
+ for (size_t i = 0; i < outputStateOutValue.size(); ++i)
+ {
+ BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
+ "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+ }
+
+ // CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM
+ // Comment out for now
+ // for (size_t i = 0; i < cellStateOutValue.size(); ++i)
+ // {
+ // BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
+ // "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+ //}
+
+ for (size_t i = 0; i < outputValue.size(); ++i)
+ {
+ BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
+ "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ }
+}
+
+void QLstmWithProjection(armnn::Compute compute)
+{
+ // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py
+ // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp
+ // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors).
+
+ uint32_t batchSize = 2;
+ uint32_t inputSize = 5;
+ uint32_t outputSize = 3;
+ uint32_t numUnits = 4;
+
+ // Inputs:
+ hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
+ std::vector<int8_t> inputValue{ 90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
+
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToInputWeightsValue{ 64, 77, 89, -102,
+ -115, 13, 25, 38,
+ -51, 64, -102, 89,
+ -77, 64, -51, -64,
+ -51, -38, -25, -13 };
+
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToForgetWeightsValue{ -77, -13, 38, 25,
+ 115, -64, -25, -51,
+ 38, -102, -51, 38,
+ -64, -51, -77, 38,
+ -51, -77, -64, -64 };
+
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToCellWeightsValue{ -51, -38, -25, -13,
+ -64, 64, -25, -38,
+ -25, -77, 77, -13,
+ -51, -38, -89, 89,
+ -115, -64, 102, 77 };
+
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToOutputWeightsValue{ -102, -51, -25, -115,
+ -13, -89, 38, -38,
+ -102, -25, 77, -25,
+ 51, -89, -38, -64,
+ 13, 64, -77, -51 };
+
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToInputWeightsValue{ -25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77 };
+
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToForgetWeightsValue{ -64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25 };
+
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToCellWeightsValue{ -38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25 };
+
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToOutputWeightsValue{ 38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25 };
+
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
+ std::vector<int16_t> cellToInputWeightsValue;
+
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
+ std::vector<int16_t> cellToForgetWeightsValue;
+
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
+ std::vector<int16_t> cellToOutputWeightsValue;
+
+ hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
+ std::vector<int32_t> inputGateBiasValue{ 644245, 3221226, 4724464, 8160438 };
+
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<int32_t> forgetGateBiasValue{ 2147484, -6442451, -4294968, 2147484 };
+
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<int32_t> cellBiasValue{-1073742, 15461883, 5368709, 1717987 };
+
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<int32_t> outputGateBiasValue{ 1073742, -214748, 4294968, 2147484 };
+
+ hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
+ std::vector<int8_t> projectionWeightsValue{ -25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51 };
+
+ hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
+ std::vector<int32_t> projectionBiasValue{ 0, 0, 0 };
+
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<int8_t> outputStateInValue{ 0, 0, 0, 0, 0, 0 };
+
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<int16_t> cellStateInValue{ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ // Normalization:
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> inputLayerNormWeightsValue{ 3277, 6553, 9830, 16384 };
+
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> forgetLayerNormWeightsValue{ 6553, 6553, 13107, 9830 };
+
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> cellLayerNormWeightsValue{ 22937, 6553, 9830, 26214 };
+
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> outputLayerNormWeightsValue{ 19660, 6553, 6553, 16384 };
+
+ float cellClipValue = 0.0f;
+ float projectionClipValue = 0.0f;
+ float inputIntermediateScale = 0.007059f;
+ float forgetIntermediateScale = 0.007812f;
+ float cellIntermediateScale = 0.007059f;
+ float outputIntermediateScale = 0.007812f;
+ int32_t hiddenStateZeroPoint = 0;
+ float hiddenStateScale = 0.007f;
+
+ // Outputs:
+ hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
+ std::vector<int8_t> outputStateOutValue{ 127, 127, -108, -67, 127, 127 };
+
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<int16_t> cellStateOutValue { -14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939 };
+
+ hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
+ std::vector<int8_t> outputValue { 127, 127, -108, -67, 127, 127 };
+
+ QLstmTestImpl(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ cellClipValue,
+ projectionClipValue,
+ inputIntermediateScale,
+ forgetIntermediateScale,
+ cellIntermediateScale,
+ outputIntermediateScale,
+ hiddenStateZeroPoint,
+ hiddenStateScale,
+ outputStateOutDimensions, outputStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ outputDimensions, outputValue,
+ compute);
+}
+
+void QLstmWithNoProjection(armnn::Compute compute)
+{
+ // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
+ // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp
+ // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors).
+
+ uint32_t batchSize = 2;
+ uint32_t inputSize = 5;
+ uint32_t outputSize = 4;
+ uint32_t numUnits = 4;
+
+ // Inputs:
+ hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
+ std::vector<int8_t> inputValue { 90, 102, 13, 26, 38, 102, 13, 26, 51, 64 };
+
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{0, 0};
+ std::vector<int8_t> inputToInputWeightsValue;
+
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToForgetWeightsValue { -77, -13, 38, 25, 115,
+ -64, -25, -51, 38, -102,
+ -51, 38, -64, -51, -77,
+ 38, -51, -77, -64, -64 };
+
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToCellWeightsValue { -51, -38, -25, -13, -64,
+ 64, -25, -38, -25, -77,
+ 77, -13, -51, -38, -89,
+ 89, -115, -64, 102, 77 };
+
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<int8_t> inputToOutputWeightsValue { -102, -51, -25, -115, -13,
+ -89, 38, -38, -102, -25,
+ 77, -25, 51, -89, -38,
+ -64, 13, 64, -77, -51 };
+
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0, 0};
+ std::vector<int8_t> recurrentToInputWeightsValue;
+
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToForgetWeightsValue { -64, -38, -64, -25,
+ 77, 51, 115, 38,
+ -13, 25, 64, 25,
+ 25, 38, -13, 51 };
+
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToCellWeightsValue { -38, 25, 13, -38,
+ 102, -10, -25, 38,
+ 102, -77, -13, 25,
+ 38, -13, 25, 64 };
+
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<int8_t> recurrentToOutputWeightsValue { 38, -13, 13, -25,
+ -64, -89, -25, -77,
+ -13, -51, -89, -25,
+ 13, 64, 25, -38 };
+
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
+ std::vector<int16_t> cellToInputWeightsValue;
+
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
+ std::vector<int16_t> cellToForgetWeightsValue;
+
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
+ std::vector<int16_t> cellToOutputWeightsValue;
+
+ hidl_vec<uint32_t> inputGateBiasDimensions{0};
+ std::vector<int32_t> inputGateBiasValue;
+
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<int32_t> forgetGateBiasValue { 2147484, -6442451, -4294968, 2147484 };
+
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<int32_t> cellBiasValue { -1073742, 15461883, 5368709, 1717987 };
+
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<int32_t> outputGateBiasValue { 1073742, -214748, 4294968, 2147484 };
+
+ hidl_vec<uint32_t> projectionWeightsDimensions{0, 0};
+ std::vector<int8_t> projectionWeightsValue;
+
+ hidl_vec<uint32_t> projectionBiasDimensions{0};
+ std::vector<int32_t> projectionBiasValue;
+
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<int8_t> outputStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<int16_t> cellStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ // Normalization:
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
+ std::vector<int16_t> inputLayerNormWeightsValue;
+
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> forgetLayerNormWeightsValue { 6553, 6553, 13107, 9830 };
+
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> cellLayerNormWeightsValue { 22937, 6553, 9830, 26214 };
+
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
+ std::vector<int16_t> outputLayerNormWeightsValue { 19660, 6553, 6553, 16384 };
+
+ float cellClipValue = 0.0f;
+ float projectionClipValue = 0.0f;
+ float inputIntermediateScale = 0.007059f;
+ float forgetIntermediateScale = 0.007812f;
+ float cellIntermediateScale = 0.007059f;
+ float outputIntermediateScale = 0.007812f;
+ int32_t hiddenStateZeroPoint = 0;
+ float hiddenStateScale = 0.007f;
+
+ // Outputs:
+ hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
+ std::vector<int8_t> outputStateOutValue { -15, 21, 14, 20, -15, 15, 5, 27 };
+
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<int16_t> cellStateOutValue { -11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149 };
+
+ hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
+ std::vector<int8_t> outputValue { -15, 21, 14, 20, -15, 15, 5, 27 };
+
+ QLstmTestImpl(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ cellClipValue,
+ projectionClipValue,
+ inputIntermediateScale,
+ forgetIntermediateScale,
+ cellIntermediateScale,
+ outputIntermediateScale,
+ hiddenStateZeroPoint,
+ hiddenStateScale,
+ outputStateOutDimensions, outputStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ outputDimensions, outputValue,
+ compute);
+}
+
+} // anonymous namespace
+
+BOOST_DATA_TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
+{
+ // Support is not added yet
+ // QLstmWithProjection(sample);
+}
+
+BOOST_DATA_TEST_CASE(QLSTMWithNoProjectionTest, COMPUTE_DEVICES)
+{
+ QLstmWithNoProjection(sample);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/test/Android.mk b/test/Android.mk
index af04c838..9349a368 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -333,4 +333,99 @@ endif
include $(BUILD_EXECUTABLE)
-endif # PLATFORM_VERSION == Q \ No newline at end of file
+endif # PLATFORM_VERSION == Q
+
+ifeq ($(R_OR_LATER),1)
+# The following target is available starting from Android R
+
+##########################
+# armnn-driver-tests@1.3 #
+##########################
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := armnn-driver-tests@1.3
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_ARM_MODE := arm
+LOCAL_PROPRIETARY_MODULE := true
+
+# Mark source files as dependent on Android.mk
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_C_INCLUDES := \
+ $(OPENCL_HEADER_PATH) \
+ $(NN_HEADER_PATH) \
+ $(ARMNN_HEADER_PATH) \
+ $(ARMNN_DRIVER_HEADER_PATH)
+
+LOCAL_CFLAGS := \
+ -std=$(CPP_VERSION) \
+ -fexceptions \
+ -Werror \
+ -O0 \
+ -UNDEBUG \
+ -DBOOST_NO_AUTO_PTR \
+ -DARMNN_ANDROID_R \
+ -DARMNN_ANDROID_NN_V1_3
+
+LOCAL_SRC_FILES := \
+ 1.0/Convolution2D.cpp \
+ 1.1/Convolution2D.cpp \
+ 1.1/Mean.cpp \
+ 1.1/Transpose.cpp \
+ 1.2/Dilation.cpp \
+ 1.2/Capabilities.cpp \
+ 1.0/Lstm.cpp \
+ 1.1/Lstm.cpp \
+ 1.2/Lstm.cpp \
+ 1.3/QLstm.cpp \
+ Tests.cpp \
+ UtilsTests.cpp \
+ Concurrent.cpp \
+ FullyConnected.cpp \
+ GenericLayerTests.cpp \
+ DriverTestHelpers.cpp \
+ SystemProperties.cpp \
+ Concat.cpp \
+ TestTensor.cpp
+
+LOCAL_STATIC_LIBRARIES := \
+ libneuralnetworks_common \
+ libboost_log \
+ libboost_system \
+ libboost_unit_test_framework \
+ libboost_thread \
+ libboost_filesystem \
+ arm_compute_library
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libarmnn-driver@1.3
+
+LOCAL_SHARED_LIBRARIES := \
+ libbase \
+ libcutils \
+ libfmq \
+ libhidlbase \
+ libhidltransport \
+ libhidlmemory \
+ liblog \
+ libnativewindow \
+ libtextclassifier_hash \
+ libui \
+ libutils \
+ libsync \
+ android.hardware.neuralnetworks@1.0 \
+ android.hardware.neuralnetworks@1.1 \
+ android.hardware.neuralnetworks@1.2 \
+ android.hardware.neuralnetworks@1.3 \
+ android.hidl.allocator@1.0 \
+ android.hidl.memory@1.0
+
+ifeq ($(ARMNN_INCLUDE_LIBOPENCL),1)
+LOCAL_SHARED_LIBRARIES+= \
+ libOpenCL
+endif
+
+include $(BUILD_EXECUTABLE)
+
+endif # PLATFORM_VERSION == R \ No newline at end of file
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 18e33777..c6d44065 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -61,7 +61,7 @@ Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
return Void();
}
-#ifdef ARMNN_ANDROID_NN_V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Return<void> PreparedModelCallback_1_2::notify(V1_0::ErrorStatus status,
const android::sp<V1_0::IPreparedModel>& preparedModel)
@@ -81,6 +81,34 @@ Return<void> PreparedModelCallback_1_2::notify_1_2(V1_0::ErrorStatus status,
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3
+
+Return<void> PreparedModelCallback_1_3::notify(V1_0::ErrorStatus status,
+ const android::sp<V1_0::IPreparedModel>& preparedModel)
+{
+ m_1_0_ErrorStatus = status;
+ m_PreparedModel = preparedModel;
+ return Void();
+}
+
+Return<void> PreparedModelCallback_1_3::notify_1_2(V1_0::ErrorStatus status,
+ const android::sp<V1_2::IPreparedModel>& preparedModel)
+{
+ m_1_0_ErrorStatus = status;
+ m_PreparedModel_1_2 = preparedModel;
+ return Void();
+}
+
+Return<void> PreparedModelCallback_1_3::notify_1_3(V1_3::ErrorStatus status,
+ const android::sp<V1_3::IPreparedModel>& preparedModel)
+{
+ m_1_3_ErrorStatus = status;
+ m_PreparedModel_1_3 = preparedModel;
+ return Void();
+}
+
+#endif
+
// lifted from common/Utils.cpp
hidl_memory allocateSharedMemory(int64_t size)
{
@@ -119,7 +147,7 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& mode
return cb->GetPreparedModel();
}
-#if defined(ARMNN_ANDROID_NN_V1_1) || defined(ARMNN_ANDROID_NN_V1_2)
+#if defined(ARMNN_ANDROID_NN_V1_1) || defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
armnn_driver::ArmnnDriver& driver,
@@ -140,7 +168,7 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& mode
#endif
-#ifdef ARMNN_ANDROID_NN_V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver::hal_1_2::HalPolicy::Model& model,
armnn_driver::ArmnnDriver& driver,
@@ -166,6 +194,37 @@ android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver:
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3
+
+android::sp<V1_3::IPreparedModel> PrepareModelWithStatus_1_3(const armnn_driver::hal_1_3::HalPolicy::Model& model,
+ armnn_driver::ArmnnDriver& driver,
+ V1_3::ErrorStatus& prepareStatus)
+{
+ android::sp<PreparedModelCallback_1_3> cb(new PreparedModelCallback_1_3());
+
+ android::hardware::hidl_vec<android::hardware::hidl_handle> emptyHandle1;
+ android::hardware::hidl_vec<android::hardware::hidl_handle> emptyHandle2;
+ armnn_driver::ArmnnDriver::HidlToken emptyToken;
+
+ driver.prepareModel_1_3(model,
+ V1_1::ExecutionPreference::LOW_POWER,
+ V1_3::Priority::LOW,
+ {},
+ emptyHandle1,
+ emptyHandle2,
+ emptyToken,
+ cb);
+
+ prepareStatus = cb->Get_1_3_ErrorStatus();
+ if (prepareStatus == V1_3::ErrorStatus::NONE)
+ {
+ BOOST_TEST((cb->GetPreparedModel_1_3() != nullptr));
+ }
+ return cb->GetPreparedModel_1_3();
+}
+
+#endif
+
V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
const V1_0::Request& request,
V1_0::ErrorStatus expectedStatus)
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index 66d6ac5c..f8f38545 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -28,6 +28,16 @@ namespace V1_0
std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat);
} // namespace android::hardware::neuralnetworks::V1_0
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+namespace V1_3
+{
+
+std::ostream& operator<<(std::ostream& os, V1_3::ErrorStatus stat);
+
+} // namespace android::hardware::neuralnetworks::V1_3
+#endif
+
} // namespace android::hardware::neuralnetworks
} // namespace android::hardware
} // namespace android
@@ -37,6 +47,10 @@ namespace driverTestHelpers
std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat);
+#ifdef ARMNN_ANDROID_NN_V1_3
+std::ostream& operator<<(std::ostream& os, V1_3::ErrorStatus stat);
+#endif
+
struct ExecutionCallback : public V1_0::IExecutionCallback
{
ExecutionCallback() : mNotified(false) {}
@@ -71,7 +85,7 @@ private:
android::sp<V1_0::IPreparedModel> m_PreparedModel;
};
-#ifdef ARMNN_ANDROID_NN_V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
class PreparedModelCallback_1_2 : public V1_2::IPreparedModelCallback
{
@@ -101,6 +115,46 @@ private:
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3
+
+class PreparedModelCallback_1_3 : public V1_3::IPreparedModelCallback
+{
+public:
+ PreparedModelCallback_1_3()
+ : m_1_0_ErrorStatus(V1_0::ErrorStatus::NONE)
+ , m_1_3_ErrorStatus(V1_3::ErrorStatus::NONE)
+ , m_PreparedModel()
+ , m_PreparedModel_1_2()
+ , m_PreparedModel_1_3()
+ { }
+ ~PreparedModelCallback_1_3() override { }
+
+ Return<void> notify(V1_0::ErrorStatus status, const android::sp<V1_0::IPreparedModel>& preparedModel) override;
+
+ Return<void> notify_1_2(V1_0::ErrorStatus status, const android::sp<V1_2::IPreparedModel>& preparedModel) override;
+
+ Return<void> notify_1_3(V1_3::ErrorStatus status, const android::sp<V1_3::IPreparedModel>& preparedModel) override;
+
+ V1_0::ErrorStatus GetErrorStatus() { return m_1_0_ErrorStatus; }
+
+ V1_3::ErrorStatus Get_1_3_ErrorStatus() { return m_1_3_ErrorStatus; }
+
+ android::sp<V1_0::IPreparedModel> GetPreparedModel() { return m_PreparedModel; }
+
+ android::sp<V1_2::IPreparedModel> GetPreparedModel_1_2() { return m_PreparedModel_1_2; }
+
+ android::sp<V1_3::IPreparedModel> GetPreparedModel_1_3() { return m_PreparedModel_1_3; }
+
+private:
+ V1_0::ErrorStatus m_1_0_ErrorStatus;
+ V1_3::ErrorStatus m_1_3_ErrorStatus;
+ android::sp<V1_0::IPreparedModel> m_PreparedModel;
+ android::sp<V1_2::IPreparedModel> m_PreparedModel_1_2;
+ android::sp<V1_3::IPreparedModel> m_PreparedModel_1_3;
+};
+
+#endif
+
hidl_memory allocateSharedMemory(int64_t size);
template<typename T>
@@ -142,30 +196,6 @@ void AddOperand(HalModel& model, const HalOperand& op)
}
template<typename HalPolicy, typename HalModel = typename HalPolicy::Model>
-void AddIntOperand(HalModel& model, int32_t value, uint32_t numberOfConsumers = 1)
-{
- using HalOperand = typename HalPolicy::Operand;
- using HalOperandType = typename HalPolicy::OperandType;
- using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
-
- DataLocation location = {};
- location.offset = model.operandValues.size();
- location.length = sizeof(int32_t);
-
- HalOperand op = {};
- op.type = HalOperandType::INT32;
- op.dimensions = hidl_vec<uint32_t>{};
- op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
- op.location = location;
- op.numberOfConsumers = numberOfConsumers;
-
- model.operandValues.resize(model.operandValues.size() + location.length);
- *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
-
- AddOperand<HalPolicy>(model, op);
-}
-
-template<typename HalPolicy, typename HalModel = typename HalPolicy::Model>
void AddBoolOperand(HalModel& model, bool value, uint32_t numberOfConsumers = 1)
{
using HalOperand = typename HalPolicy::Operand;
@@ -199,80 +229,8 @@ template<>
OperandType TypeToOperandType<int32_t>();
template<typename HalPolicy,
- typename T,
- typename HalModel = typename HalPolicy::Model,
- typename HalOperandType = typename HalPolicy::OperandType,
- typename HalOperandLifeTime = typename HalPolicy::OperandLifeTime>
-void AddTensorOperand(HalModel& model,
- const hidl_vec<uint32_t>& dimensions,
- const T* values,
- HalOperandType operandType = HalOperandType::TENSOR_FLOAT32,
- HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY,
- double scale = 0.f,
- int offset = 0,
- uint32_t numberOfConsumers = 1)
-{
- using HalOperand = typename HalPolicy::Operand;
-
- uint32_t totalElements = 1;
- for (uint32_t dim : dimensions)
- {
- totalElements *= dim;
- }
-
- DataLocation location = {};
- location.length = totalElements * sizeof(T);
-
- if(operandLifeTime == HalOperandLifeTime::CONSTANT_COPY)
- {
- location.offset = model.operandValues.size();
- }
-
- HalOperand op = {};
- op.type = operandType;
- op.dimensions = dimensions;
- op.scale = scale;
- op.zeroPoint = offset;
- op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
- op.location = location;
- op.numberOfConsumers = numberOfConsumers;
-
- model.operandValues.resize(model.operandValues.size() + location.length);
- for (uint32_t i = 0; i < totalElements; i++)
- {
- *(reinterpret_cast<T*>(&model.operandValues[location.offset]) + i) = values[i];
- }
-
- AddOperand<HalPolicy>(model, op);
-}
-
-template<typename HalPolicy,
- typename T,
- typename HalModel = typename HalPolicy::Model,
- typename HalOperandType = typename HalPolicy::OperandType,
- typename HalOperandLifeTime = typename HalPolicy::OperandLifeTime>
-void AddTensorOperand(HalModel& model,
- const hidl_vec<uint32_t>& dimensions,
- const std::vector<T>& values,
- HalOperandType operandType = HalPolicy::OperandType::TENSOR_FLOAT32,
- HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY,
- double scale = 0.f,
- int offset = 0,
- uint32_t numberOfConsumers = 1)
-{
- AddTensorOperand<HalPolicy, T>(model,
- dimensions,
- values.data(),
- operandType,
- operandLifeTime,
- scale,
- offset,
- numberOfConsumers);
-}
-
-template<typename HalPolicy,
- typename HalModel = typename HalPolicy::Model,
- typename HalOperandType = typename HalPolicy::OperandType>
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType>
void AddInputOperand(HalModel& model,
const hidl_vec<uint32_t>& dimensions,
HalOperandType operandType = HalOperandType::TENSOR_FLOAT32,
@@ -298,8 +256,8 @@ void AddInputOperand(HalModel& model,
}
template<typename HalPolicy,
- typename HalModel = typename HalPolicy::Model,
- typename HalOperandType = typename HalPolicy::OperandType>
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType>
void AddOutputOperand(HalModel& model,
const hidl_vec<uint32_t>& dimensions,
HalOperandType operandType = HalOperandType::TENSOR_FLOAT32,
@@ -329,7 +287,7 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& mode
V1_0::ErrorStatus& prepareStatus,
V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE);
-#if defined(ARMNN_ANDROID_NN_V1_1) || defined(ARMNN_ANDROID_NN_V1_2)
+#if defined(ARMNN_ANDROID_NN_V1_1) || defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
armnn_driver::ArmnnDriver& driver,
@@ -346,7 +304,7 @@ android::sp<V1_0::IPreparedModel> PrepareModel(const HalModel& model,
return PrepareModelWithStatus(model, driver, prepareStatus);
}
-#ifdef ARMNN_ANDROID_NN_V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver::hal_1_2::HalPolicy::Model& model,
armnn_driver::ArmnnDriver& driver,
@@ -363,6 +321,203 @@ android::sp<V1_2::IPreparedModel> PrepareModel_1_2(const HalModel& model,
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3
+
+template<typename HalPolicy>
+void AddOperand(armnn_driver::hal_1_3::HalPolicy::Model& model,
+ const armnn_driver::hal_1_3::HalPolicy::Operand& op)
+{
+ model.main.operands.resize(model.main.operands.size() + 1);
+ model.main.operands[model.main.operands.size() - 1] = op;
+}
+
+template<typename HalPolicy>
+void AddInputOperand(armnn_driver::hal_1_3::HalPolicy::Model& model,
+ const hidl_vec<uint32_t>& dimensions,
+ armnn_driver::hal_1_3::HalPolicy::OperandType operandType =
+ armnn_driver::hal_1_3::HalPolicy::OperandType::TENSOR_FLOAT32,
+ double scale = 0.f,
+ int offset = 0,
+ uint32_t numberOfConsumers = 1)
+{
+ using HalOperand = typename armnn_driver::hal_1_3::HalPolicy::Operand;
+ using HalOperandLifeTime = typename armnn_driver::hal_1_3::HalPolicy::OperandLifeTime;
+
+ HalOperand op = {};
+ op.type = operandType;
+ op.scale = scale;
+ op.zeroPoint = offset;
+ op.dimensions = dimensions;
+ op.lifetime = HalOperandLifeTime::SUBGRAPH_INPUT;
+ op.numberOfConsumers = numberOfConsumers;
+
+ AddOperand<HalPolicy>(model, op);
+
+ model.main.inputIndexes.resize(model.main.inputIndexes.size() + 1);
+ model.main.inputIndexes[model.main.inputIndexes.size() - 1] = model.main.operands.size() - 1;
+}
+
+template<typename HalPolicy>
+void AddOutputOperand(armnn_driver::hal_1_3::HalPolicy::Model& model,
+ const hidl_vec<uint32_t>& dimensions,
+ armnn_driver::hal_1_3::HalPolicy::OperandType operandType =
+ armnn_driver::hal_1_3::HalPolicy::OperandType::TENSOR_FLOAT32,
+ double scale = 0.f,
+ int offset = 0,
+ uint32_t numberOfConsumers = 0)
+{
+ using HalOperand = typename armnn_driver::hal_1_3::HalPolicy::Operand;
+ using HalOperandLifeTime = typename armnn_driver::hal_1_3::HalPolicy::OperandLifeTime;
+
+ HalOperand op = {};
+ op.type = operandType;
+ op.scale = scale;
+ op.zeroPoint = offset;
+ op.dimensions = dimensions;
+ op.lifetime = HalOperandLifeTime::SUBGRAPH_OUTPUT;
+ op.numberOfConsumers = numberOfConsumers;
+
+ AddOperand<HalPolicy>(model, op);
+
+ model.main.outputIndexes.resize(model.main.outputIndexes.size() + 1);
+ model.main.outputIndexes[model.main.outputIndexes.size() - 1] = model.main.operands.size() - 1;
+}
+
+android::sp<V1_3::IPreparedModel> PrepareModelWithStatus_1_3(const armnn_driver::hal_1_3::HalPolicy::Model& model,
+ armnn_driver::ArmnnDriver& driver,
+ V1_3::ErrorStatus& prepareStatus);
+
+template<typename HalModel>
+android::sp<V1_3::IPreparedModel> PrepareModel_1_3(const HalModel& model,
+ armnn_driver::ArmnnDriver& driver)
+{
+ V1_3::ErrorStatus prepareStatus = V1_3::ErrorStatus::NONE;
+ return PrepareModelWithStatus_1_3(model, driver, prepareStatus);
+}
+
+#endif
+
+template<typename HalPolicy,
+ typename T,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType,
+ typename HalOperandLifeTime = typename HalPolicy::OperandLifeTime>
+void AddTensorOperand(HalModel& model,
+ const hidl_vec<uint32_t>& dimensions,
+ const T* values,
+ HalOperandType operandType = HalOperandType::TENSOR_FLOAT32,
+ HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY,
+ double scale = 0.f,
+ int offset = 0,
+ uint32_t numberOfConsumers = 1)
+{
+ using HalOperand = typename HalPolicy::Operand;
+
+ uint32_t totalElements = 1;
+ for (uint32_t dim : dimensions)
+ {
+ totalElements *= dim;
+ }
+
+ DataLocation location = {};
+ location.length = totalElements * sizeof(T);
+
+ if(operandLifeTime == HalOperandLifeTime::CONSTANT_COPY)
+ {
+ location.offset = model.operandValues.size();
+ }
+
+ HalOperand op = {};
+ op.type = operandType;
+ op.dimensions = dimensions;
+ op.scale = scale;
+ op.zeroPoint = offset;
+ op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
+ op.location = location;
+ op.numberOfConsumers = numberOfConsumers;
+
+ model.operandValues.resize(model.operandValues.size() + location.length);
+ for (uint32_t i = 0; i < totalElements; i++)
+ {
+ *(reinterpret_cast<T*>(&model.operandValues[location.offset]) + i) = values[i];
+ }
+
+ AddOperand<HalPolicy>(model, op);
+}
+
+template<typename HalPolicy,
+ typename T,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType,
+ typename HalOperandLifeTime = typename HalPolicy::OperandLifeTime>
+void AddTensorOperand(HalModel& model,
+ const hidl_vec<uint32_t>& dimensions,
+ const std::vector<T>& values,
+ HalOperandType operandType = HalPolicy::OperandType::TENSOR_FLOAT32,
+ HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY,
+ double scale = 0.f,
+ int offset = 0,
+ uint32_t numberOfConsumers = 1)
+{
+ AddTensorOperand<HalPolicy, T>(model,
+ dimensions,
+ values.data(),
+ operandType,
+ operandLifeTime,
+ scale,
+ offset,
+ numberOfConsumers);
+}
+
+template<typename HalPolicy, typename HalModel = typename HalPolicy::Model>
+void AddIntOperand(HalModel& model, int32_t value, uint32_t numberOfConsumers = 1)
+{
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
+ DataLocation location = {};
+ location.offset = model.operandValues.size();
+ location.length = sizeof(int32_t);
+
+ HalOperand op = {};
+ op.type = HalOperandType::INT32;
+ op.dimensions = hidl_vec<uint32_t>{};
+ op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
+ op.location = location;
+ op.numberOfConsumers = numberOfConsumers;
+
+ model.operandValues.resize(model.operandValues.size() + location.length);
+ *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
+
+ AddOperand<HalPolicy>(model, op);
+}
+
+template<typename HalPolicy, typename HalModel = typename HalPolicy::Model>
+void AddFloatOperand(HalModel& model,
+ float value,
+ uint32_t numberOfConsumers = 1)
+{
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
+ DataLocation location = {};
+ location.offset = model.operandValues.size();
+ location.length = sizeof(float);
+
+ HalOperand op = {};
+ op.type = HalOperandType::FLOAT32;
+ op.dimensions = hidl_vec<uint32_t>{};
+ op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
+ op.location = location;
+ op.numberOfConsumers = numberOfConsumers;
+
+ model.operandValues.resize(model.operandValues.size() + location.length);
+ *reinterpret_cast<float*>(&model.operandValues[location.offset]) = value;
+
+ AddOperand<HalPolicy>(model, op);
+}
V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
const V1_0::Request& request,
diff --git a/test/Lstm.hpp b/test/Lstm.hpp
index 3ac095dc..f9f1a761 100644
--- a/test/Lstm.hpp
+++ b/test/Lstm.hpp
@@ -76,7 +76,7 @@ void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, cons
}
}
-#ifdef ARMNN_ANDROID_NN_V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
template<>
void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,