aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp23
1 files changed, 8 insertions, 15 deletions
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index 404a412ca0..c68051c8ca 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -24,14 +24,12 @@
namespace
{
-using MultiArray = const boost::multi_array<uint8_t, 2>&;
-
-armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
- MultiArray expectedOutput)
+armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape,
+ armnn::TensorShape& outputExpectedShape)
{
- auto batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- auto outputSize = armnn::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
+ auto batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ auto inputSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
+ auto outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
float inputOutputScale = 0.0078125f;
int32_t inputOutputOffset = 128;
@@ -182,26 +180,21 @@ void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
{
std::vector<uint8_t> inputVector = {166, 179, 50, 150};
armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
- boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
- boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
// Builds up the structure of the network
- armnn::INetworkPtr net = CreateQuantizedLstmNetwork(input, outputStateOut);
+ armnn::INetworkPtr net = CreateQuantizedLstmNetwork(inputDesc.GetShape(), outputDesc.GetShape());
BOOST_TEST_CHECKPOINT("create a network");
@@ -227,8 +220,8 @@ void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
outputTensors.reserve(2);
//output
- std::vector<int16_t > cellStateOutResult(cellStateOutVector.size());
- std::vector<uint8_t > outputStateOutResult(outputStateOutVector.size());
+ std::vector<int16_t> cellStateOutResult(cellStateOutVector.size());
+ std::vector<uint8_t> outputStateOutResult(outputStateOutVector.size());
outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), cellStateOutResult.data())});
outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), outputStateOutResult.data())});