From 483c811ea6fd0e7801aac1afd979ed02a649064b Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 1 Jun 2021 09:24:52 +0100 Subject: IVGCVSW-5962 Remove boost::multi_array * Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan Signed-off-by: Sadik Armagan Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743 --- .../test/layerTests/LstmTestImpl.cpp | 1710 +++++++++----------- 1 file changed, 803 insertions(+), 907 deletions(-) (limited to 'src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp index 1c63542dcb..11003a2e97 100644 --- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp @@ -20,18 +20,17 @@ #include -#include - namespace { template> void LstmUtilsVectorBatchVectorAddTestImpl( - boost::multi_array& vec, - boost::multi_array& batchVec, + std::vector& vec, + std::vector& batchVec, uint32_t vSize, uint32_t nBatch, - boost::multi_array& expectedOutput ) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -45,19 +44,20 @@ void LstmUtilsVectorBatchVectorAddTestImpl( VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder); // check shape and compare values - auto result = CompareTensors(batchVec, expectedOutput); + auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position batchVecEncoder->Set(1.0f); - BOOST_TEST(batchVec[0][0] == 1.0f); + BOOST_TEST(batchVec[0] == 1.0f); } template> void LstmUtilsZeroVectorTestImpl( - boost::multi_array& input, + std::vector& input, uint32_t vSize, - boost::multi_array& expectedOutput) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -71,7 +71,7 @@ void LstmUtilsZeroVectorTestImpl( ZeroVector(*outputEncoder, vSize); // check shape and compare values - auto result = CompareTensors(input, expectedOutput); + auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position @@ -82,10 +82,11 @@ void LstmUtilsZeroVectorTestImpl( template> void LstmUtilsMeanStddevNormalizationTestImpl( - boost::multi_array& input, + std::vector& input, uint32_t vSize, uint32_t nBatch, - boost::multi_array& expectedOutput) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -98,21 +99,22 @@ void LstmUtilsMeanStddevNormalizationTestImpl( MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f); // check shape and compare values - auto result = CompareTensors(input, expectedOutput); + auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position outputEncoder->Set(1.0f); - BOOST_TEST(input[0][0] == 1.0f); + BOOST_TEST(input[0] == 1.0f); } template> void LstmUtilsVectorBatchVectorCwiseProductTestImpl( - boost::multi_array& vec, - boost::multi_array& batchVec, + std::vector& vec, + std::vector& batchVec, uint32_t vSize, uint32_t nBatch, - boost::multi_array& expectedOutput) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -126,12 +128,12 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl( VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder); // check shape and compare values - auto result = CompareTensors(batchVec, expectedOutput); + auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position batchVecEncoder->Set(1.0f); - BOOST_TEST(batchVec[0][0] == 1.0f); + BOOST_TEST(batchVec[0] == 1.0f); } // Lstm Layer tests: @@ -142,16 +144,18 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputExpectedShape, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) { IgnoreUnused(memoryManager); - unsigned int batchSize = armnn::numeric_cast(input.shape()[0]); - unsigned int inputSize = armnn::numeric_cast(input.shape()[1]); - unsigned int outputSize = armnn::numeric_cast(outputExpected.shape()[1]); + unsigned int batchSize = armnn::numeric_cast(inputShape[0]); + unsigned int inputSize = armnn::numeric_cast(inputShape[1]); + unsigned int outputSize = armnn::numeric_cast(outputExpectedShape[1]); // cellSize and outputSize have the same size when there is no projection. unsigned numUnits = outputSize; @@ -164,30 +168,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - std::vector inputVector; inputVector.assign(input.data(), input.data() + (batchSize * inputSize)); - auto inputTensor = MakeTensor(inputTensorInfo, inputVector); std::vector cellStateInVector(batchSize * numUnits, T()); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - std::vector outputStateInVector(batchSize * outputSize, T()); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); - std::vector scratchBufferVector(batchSize * numUnits * 4, T()); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); - std::vector outputStateOutVector(batchSize * outputSize, T()); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); - std::vector cellStateOutVector(batchSize * numUnits, T()); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize)); - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr cellStateInHandle = @@ -219,59 +212,59 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset); - auto inputToInputWeights = MakeTensor(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f, - -0.34550029f, 0.04266912f, -0.15680569f, - -0.34856534f, 0.43890524f}); + std::vector inputToInputWeights = {-0.45018822f, -0.02338299f, -0.0870589f, + -0.34550029f, 0.04266912f, -0.15680569f, + -0.34856534f, 0.43890524f}; - auto inputToForgetWeights = MakeTensor(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f, - -0.31343272f, -0.40032279f, 0.44781327f, - 0.01387155f, -0.35593212f}); + std::vector inputToForgetWeights = { 0.09701663f, 0.20334584f, -0.50592935f, + -0.31343272f, -0.40032279f, 0.44781327f, + 0.01387155f, -0.35593212f}; - auto inputToCellWeights = MakeTensor(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, - -0.20583314f, 0.44344562f, 0.22077113f, - -0.29909778f}); + std::vector inputToCellWeights = { -0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, + -0.20583314f, 0.44344562f, 0.22077113f, + -0.29909778f}; - auto inputToOutputWeights = MakeTensor(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f, - 0.40525138f, 0.44272184f, 0.03897077f, - -0.1556896f, 0.19487578f}); + std::vector inputToOutputWeights = { -0.25065863f, -0.28290087f, 0.04613829f, + 0.40525138f, 0.44272184f, 0.03897077f, + -0.1556896f, 0.19487578f}; - auto recurrentToInputWeights = MakeTensor(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f, - -0.35746509f, 0.28902304f, 0.08183324f, - -0.16555229f, 0.02286911f, -0.13566875f, - 0.03034258f, 0.48091322f, -0.12528998f, - 0.24077177f, -0.51332325f, -0.33502164f, - 0.10629296f}); + std::vector recurrentToInputWeights = {-0.0063535f, -0.2042388f, 0.31454784f, + -0.35746509f, 0.28902304f, 0.08183324f, + -0.16555229f, 0.02286911f, -0.13566875f, + 0.03034258f, 0.48091322f, -0.12528998f, + 0.24077177f, -0.51332325f, -0.33502164f, + 0.10629296f}; - auto recurrentToForgetWeights = MakeTensor(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f, - 0.2112639f, 0.27654213f, 0.20864892f, - -0.07646349f, 0.45877004f, 0.00141793f, - -0.14609534f, 0.36447752f, 0.09196436f, - 0.28053468f, 0.01560611f, -0.20127171f, - -0.01140004f}); + std::vector recurrentToForgetWeights = { -0.48684245f, -0.06655136f, 0.42224967f, + 0.2112639f, 0.27654213f, 0.20864892f, + -0.07646349f, 0.45877004f, 0.00141793f, + -0.14609534f, 0.36447752f, 0.09196436f, + 0.28053468f, 0.01560611f, -0.20127171f, + -0.01140004f}; - auto recurrentToCellWeights = MakeTensor(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f, - 0.26320225f, 0.05695659f, -0.00123841f, - -0.4744786f, -0.35869038f, -0.06418842f, - -0.13502428f, -0.501764f, 0.22830659f, - -0.46367589f, 0.26016325f, -0.03894562f, - -0.16368064f}); + std::vector recurrentToCellWeights = { -0.3407414f, 0.24443203f, -0.2078532f, + 0.26320225f, 0.05695659f, -0.00123841f, + -0.4744786f, -0.35869038f, -0.06418842f, + -0.13502428f, -0.501764f, 0.22830659f, + -0.46367589f, 0.26016325f, -0.03894562f, + -0.16368064f}; - auto recurrentToOutputWeights = MakeTensor(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f, - 0.09215671f, 0.24107647f, -0.39835793f, - 0.18212086f, 0.01301402f, 0.48572797f, - -0.50656658f, 0.20047462f, -0.20607421f, - -0.51818722f, -0.15390486f, 0.0468148f, - 0.39922136f}); + std::vector recurrentToOutputWeights = { 0.43385774f, -0.17194885f, 0.2718237f, + 0.09215671f, 0.24107647f, -0.39835793f, + 0.18212086f, 0.01301402f, 0.48572797f, + -0.50656658f, 0.20047462f, -0.20607421f, + -0.51818722f, -0.15390486f, 0.0468148f, + 0.39922136f}; - auto cellToInputWeights = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector cellToInputWeights = {0., 0., 0., 0.}; - auto inputGateBias = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector inputGateBias = {0., 0., 0., 0.}; - auto forgetGateBias = MakeTensor(tensorInfo4, {1., 1., 1., 1.}); + std::vector forgetGateBias = {1., 1., 1., 1.}; - auto cellBias = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector cellBias = {0., 0., 0., 0.}; - auto outputGateBias = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector outputGateBias = {0., 0., 0., 0.}; armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8); armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8); @@ -287,19 +280,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4); armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4); - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); data.m_InputToInputWeights = &inputToInputWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -330,15 +323,18 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -346,8 +342,8 @@ LayerTestResult LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) @@ -368,30 +364,19 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - std::vector inputVector; inputVector.assign(input.data(), input.data() + (batchSize * inputSize)); - auto inputTensor = MakeTensor(inputTensorInfo, inputVector); std::vector cellStateInVector(batchSize * numUnits, T()); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - std::vector outputStateInVector(batchSize * outputSize, T()); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); - std::vector scratchBufferVector(batchSize * numUnits * 4, T()); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); - std::vector outputStateOutVector(batchSize * outputSize, T()); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); - std::vector cellStateOutVector(batchSize * numUnits, T()); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize)); - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr cellStateInHandle = @@ -425,135 +410,118 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset); - auto inputToInputWeights = - MakeTensor(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f, - 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f, - -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f, - -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f, - -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f, - -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f, - -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f, - 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f, - 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f, - 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f, - -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f, - 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f, - -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f, - -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f, - -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f, - 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f, - -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f, - -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f, - -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f, - -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f - }); - - auto inputToForgetWeights = - MakeTensor(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f, - -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f, - -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f, - 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f, - 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f, - -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f, - -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f, - 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f, - 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f, - 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f, - 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f, - -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f, - 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f, - -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f, - -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f, - 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f, - 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f, - 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f, - -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f, - 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f - }); - - auto inputToCellWeights = - MakeTensor(tensorInfo20x5, {-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f, - -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f, - -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f, - -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f, - -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f, - 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f, - -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f, - 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f, - -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f, - -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f, - -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f, - 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f, - 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f, - 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f, - -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f, - -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f, - -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f, - -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f, - -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f, - -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f, - 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f, - 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f, - 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f, - 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f, - 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f - }); - - auto inputToOutputWeights = - MakeTensor(tensorInfo20x5, {-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f, - -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f, - 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f, - -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f, - -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f, - 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f, - -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f, - -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f, - -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f, - -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f, - 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f, - 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f, - 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f, - -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f, - 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f, - 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f, - -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f, - 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f, - -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f, - -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f - }); - - auto inputGateBias = - MakeTensor(tensorInfo20, {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f, - -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f, - -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f, - 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f - }); - - auto forgetGateBias = - MakeTensor(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, - 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f, - 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f, - -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f, - 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f - }); - - auto cellBias = - MakeTensor(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, - -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f, - -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f, - -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f, - 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f - }); - - auto outputGateBias = - MakeTensor(tensorInfo20, {0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f, - 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f, - 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f, - -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f - }); - - auto recurrentToInputWeights = - MakeTensor(tensorInfo20x16, {-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, + std::vector inputToInputWeights = {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f, + 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f, + -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f, + -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f, + -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f, + -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f, + -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f, + 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f, + 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f, + 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f, + -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f, + 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f, + -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f, + -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f, + -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f, + 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f, + -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f, + -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f, + -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f, + -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f }; + + std::vector inputToForgetWeights = {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f, + -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f, + -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f, + 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f, + 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f, + -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f, + -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f, + 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f, + 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f, + 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f, + 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f, + -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f, + 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f, + -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f, + -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f, + 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f, + 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f, + 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f, + -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f, + 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f }; + + std::vector inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f, + -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f, + -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f, + -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f, + -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f, + 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f, + -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f, + 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f, + -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f, + -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f, + -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f, + 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f, + 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f, + 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f, + -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f, + -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f, + -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f, + -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f, + -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f, + -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f, + 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f, + 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f, + 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f, + 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f, + 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f }; + + std::vector inputToOutputWeights ={-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f, + -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f, + 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f, + -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f, + -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f, + 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f, + -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f, + -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f, + -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f, + -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f, + 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f, + 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f, + 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f, + -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f, + 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f, + 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f, + -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f, + 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f, + -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f, + -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f }; + + std::vector inputGateBias = {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f, + -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f, + -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f, + 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f }; + + std::vector forgetGateBias ={0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, + 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f, + 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f, + -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f, + 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f }; + + std::vector cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, + -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f, + -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f, + -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f, + 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f }; + + std::vector outputGateBias ={0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f, + 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f, + 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f, + -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f}; + + std::vector recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f, -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f, -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f, @@ -632,11 +600,9 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f, -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f, - -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f - }); + -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f }; - auto recurrentToForgetWeights = - MakeTensor(tensorInfo20x16, {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, + std::vector recurrentToForgetWeights = {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f, -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f, 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f, @@ -715,11 +681,9 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f, 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f, - -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f - }); + -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f }; - auto recurrentToCellWeights = - MakeTensor(tensorInfo20x16, {-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f, + std::vector recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f, 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f, 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f, -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f, @@ -798,12 +762,10 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f, -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f, -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f, - -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f - }); + -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f }; - auto recurrentToOutputWeights = - MakeTensor(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f, - -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f, + std::vector recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f, + -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f, -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f, -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f, -0.023314456f, @@ -879,101 +841,90 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl -0.05347844f, -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f, 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f, -0.057309967f, -0.012775832f, -0.0032452994f, - 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f - }); - - auto cellToInputWeights = - MakeTensor(tensorInfo20, {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f, - -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f, - -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f, - 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f - }); - - - auto cellToForgetWeights = - MakeTensor(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f, - -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f, - -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f, - 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f - }); - - auto cellToOutputWeights = - MakeTensor(tensorInfo20, {0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f, - -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f, - -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f, - 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f - }); - - auto projectionWeights = - MakeTensor(tensorInfo16x20, - {-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f, - 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f, - -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f, - -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f, - 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f, - 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f, - 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f, - 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f, - -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f, - -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f, - -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f, - 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f, - 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f, - 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f, - 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f, - 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f, - -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f, - 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f, - -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f, - 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f, - -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f, - -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f, - 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f, - -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f, - 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f, - -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f, - -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f, - 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f, - -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f, - -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f, - -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f, - 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f, - 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f, - -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f, - 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f, - 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f, - 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f, - 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f, - 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f, - -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f, - -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f, - 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f, - -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f, - -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f, - 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f, - 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f, - 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f, - -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f, - -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f, - -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f, - 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f, - -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f, - 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f, - 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f, - -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f, - -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f, - -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f, - 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f, - -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f, - -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f, - -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f, - 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f, - 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f, - 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f - }); + 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f}; + + std::vector cellToInputWeights = {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f, + -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f, + -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f, + 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f}; + + + std::vector cellToForgetWeights = {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f, + -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f, + -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f, + 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f}; + + std::vector cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f, + -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f, + -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f, + 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f}; + + std::vector projectionWeights={-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f, + 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f, + -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f, + -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f, + 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f, + 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f, + 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f, + 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f, + -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f, + -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f, + -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f, + 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f, + 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f, + 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f, + 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f, + 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f, + -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f, + 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f, + -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f, + 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f, + -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f, + -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f, + 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f, + -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f, + 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f, + -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f, + -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f, + 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f, + -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f, + -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f, + -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f, + 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f, + 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f, + -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f, + 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f, + 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f, + 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f, + 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f, + 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f, + -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f, + -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f, + 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f, + -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f, + -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f, + 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f, + 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f, + 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f, + -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f, + -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f, + -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f, + 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f, + -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f, + 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f, + 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f, + -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f, + -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f, + -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f, + 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f, + -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f, + -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f, + -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f, + 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f, + 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f, + 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f}; std::vector projectionBiasVector(outputSize, 0.f); - auto projectionBias = MakeTensor(tensorInfo16, projectionBiasVector); armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5); armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5); @@ -993,23 +944,23 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20); armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16); - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data()); data.m_InputToInputWeights = &inputToInputWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1035,7 +986,6 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl data.m_Parameters.m_PeepholeEnabled = true; data.m_Parameters.m_ProjectionEnabled = true; - std::unique_ptr workload = workloadFactory.CreateLstm(data, info); inputHandle->Allocate(); outputStateInHandle->Allocate(); @@ -1046,16 +996,18 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); - - return ret; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -1063,8 +1015,10 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputExpectedShape, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) @@ -1074,10 +1028,10 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( bool peepholeEnabled = true; bool projectionEnabled = false; // These are not the input and the output of Lstm yet - unsigned int batchSize = armnn::numeric_cast(input.shape()[0]); - unsigned int inputSize = armnn::numeric_cast(input.shape()[1]); + unsigned int batchSize = armnn::numeric_cast(inputShape[0]); + unsigned int inputSize = armnn::numeric_cast(inputShape[1]); - unsigned int outputSize = armnn::numeric_cast(outputExpected.shape()[1]); + unsigned int outputSize = armnn::numeric_cast(outputExpectedShape[1]); const unsigned int cellSize = outputSize; @@ -1095,14 +1049,10 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( // List of inputs std::vector inputData; inputData.assign(input.data(), input.data() + batchSize*inputSize); - auto inputTensor = MakeTensor(inputTensorInfo, inputData); std::vector outputStateInVector(batchSize * outputSize, 0.f); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); std::vector cellStateInVector(batchSize * cellSize, 0.f); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - // Prepare all the weights in the descriptor for LSTM armnn::LstmQueueDescriptor data; @@ -1110,41 +1060,51 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset); - auto inputToCellWeights = MakeTensor(tensorInfoInput, - {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f, - 0.04717243f, 0.48944736f, -0.38535351f, - -0.17212132f}); - auto inputToForgetWeights = MakeTensor(tensorInfoInput, - {-0.55291498f, -0.42866567f, 0.13056988f, - -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f, - 0.33826375f}); - auto inputToOutputWeights = MakeTensor(tensorInfoInput, - {0.10725588f, -0.02335852f, -0.55932593f, - -0.09426838f, -0.44257352f, 0.54939759f, - 0.01533556f, 0.42751634f}); - auto cellBias = MakeTensor(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f}); - auto forgetGateBias = MakeTensor(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f}); - auto outputGateBias = MakeTensor(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f}); - - auto recurrentToCellWeights = MakeTensor(tensorInfoOutput, - {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f, - 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f, - 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f, - 0.21193194f}); - auto recurrentToForgetWeights = MakeTensor(tensorInfoOutput, - {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f, - 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f, - -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f}); - - auto recurrentToOutputWeights = MakeTensor(tensorInfoOutput, - {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f, - -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f, - 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f}); - - auto cellToForgetWeights = MakeTensor(tensorInfoNumUnits, - {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f}); - auto cellToOutputWeights = MakeTensor(tensorInfoNumUnits, - {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f}); + std::vector inputToCellWeights = + { + -0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f, + 0.04717243f, 0.48944736f, -0.38535351f, + -0.17212132f + }; + std::vector inputToForgetWeights = + { + -0.55291498f, -0.42866567f, 0.13056988f, + -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f, + 0.33826375f + }; + std::vector inputToOutputWeights = + { + 0.10725588f, -0.02335852f, -0.55932593f, + -0.09426838f, -0.44257352f, 0.54939759f, + 0.01533556f, 0.42751634f + }; + std::vector cellBias = {0.f, 0.f, 0.f, 0.f}; + std::vector forgetGateBias = {1.f, 1.f, 1.f, 1.f}; + std::vector outputGateBias = {0.f, 0.f, 0.f, 0.f}; + + std::vector recurrentToCellWeights = + { + 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f, + 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f, + 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f, + 0.21193194f + }; + std::vector recurrentToForgetWeights = + { + -0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f, + 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f, + -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f + }; + + std::vector recurrentToOutputWeights = + { + 0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f, + -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f, + 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f + }; + + std::vector cellToForgetWeights = {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f}; + std::vector cellToOutputWeights = {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f}; armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput); armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput); @@ -1158,25 +1118,23 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput); armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput); - armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits); armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data()); data.m_InputToCellWeights = &inputToCellWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1202,29 +1160,28 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( data.m_Parameters.m_ClippingThresProj = 0.0; data.m_Parameters.m_ClippingThresCell = 0.0; - // List of outputs std::vector scratchBufferVector(batchSize * scratchBufferSize, T()); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); LayerTestResult ret0(scratchBufferTensorInfo); // Output state for a certain time step std::vector outputStateOutVector(batchSize * outputSize, T()); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); LayerTestResult ret1(outputStateOutTensorInfo); // Cell state for a certain time step std::vector cellStateOutVector(batchSize * cellSize, T()); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); LayerTestResult ret2(cellStateOutTensorInfo); // Output for a certain time step - std::vector outputVector(batchSize * outputSize, T()); - auto outputTensor = MakeTensor(outputTensorInfo, outputVector); std::vector outputData; outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize); LayerTestResult ret3(outputTensorInfo); - ret3.outputExpected = MakeTensor(outputTensorInfo, outputData); + ret3.m_ExpectedData = outputData; + + std::vector actualScratchBufferOutput(scratchBufferTensorInfo.GetNumElements()); + std::vector actualOutputStateOutput(outputStateOutTensorInfo.GetNumElements()); + std::vector actualCellStateOutput(cellStateOutTensorInfo.GetNumElements()); + std::vector actualOutput(outputTensorInfo.GetNumElements()); // Prepare the inputs and outputs for the workload std::unique_ptr inputHandle = @@ -1255,7 +1212,6 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( std::unique_ptr workload = workloadFactory.CreateLstm(data, info); - inputHandle->Allocate(); outputStateInHandle->Allocate(); cellStateInHandle->Allocate(); @@ -1265,21 +1221,25 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); - - CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]); - CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]); - CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]); + CopyDataToITensorHandle(scratchBufferHandle.get(), scratchBufferVector.data()); + CopyDataToITensorHandle(outputStateOutHandle.get(), outputStateOutVector.data()); + CopyDataToITensorHandle(cellStateOutHandle.get(), cellStateOutVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get()); - CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get()); - CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get()); - CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualScratchBufferOutput.data(), scratchBufferHandle.get()); + CopyDataFromITensorHandle(actualOutputStateOutput.data(), outputStateOutHandle.get()); + CopyDataFromITensorHandle(actualCellStateOutput.data(), cellStateOutHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + ret0.m_ActualData = actualScratchBufferOutput; + ret1.m_ActualData = actualOutputStateOutput; + ret2.m_ActualData = actualCellStateOutput; + ret3.m_ActualData = actualOutput; return ret3; } @@ -1289,8 +1249,8 @@ LayerTestResult LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) @@ -1311,30 +1271,19 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - std::vector inputVector; inputVector.assign(input.data(), input.data() + (batchSize * inputSize)); - auto inputTensor = MakeTensor(inputTensorInfo, inputVector); std::vector cellStateInVector(batchSize * numUnits, 0.f); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - std::vector outputStateInVector(batchSize * outputSize, 0.f); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); - std::vector scratchBufferVector(batchSize * numUnits * 4, 0.f); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); - std::vector outputStateOutVector(batchSize * outputSize, 0.f); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); - std::vector cellStateOutVector(batchSize * numUnits, 0.f); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize)); - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr cellStateInHandle = @@ -1368,95 +1317,73 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset); - auto inputToInputWeights = - MakeTensor(tensorInfo4x5, { 0.5f, 0.6f, 0.7f, -0.8f, -0.9f, - 0.1f, 0.2f, 0.3f, -0.4f, 0.5f, - -0.8f, 0.7f, -0.6f, 0.5f, -0.4f, - -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}); //{numUnits, inputSize} + std::vector inputToInputWeights = {0.5f, 0.6f, 0.7f, -0.8f, -0.9f, + 0.1f, 0.2f, 0.3f, -0.4f, 0.5f, + -0.8f, 0.7f, -0.6f, 0.5f, -0.4f, + -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}; //{numUnits, inputSize} - auto inputToForgetWeights = - MakeTensor(tensorInfo4x5, {-0.6f, -0.1f, 0.3f, 0.2f, 0.9f, - -0.5f, -0.2f, -0.4f, 0.3f, -0.8f, - -0.4f, 0.3f, -0.5f, -0.4f, -0.6f, - 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}); //{numUnits, inputSize} + std::vector inputToForgetWeights = { -0.6f, -0.1f, 0.3f, 0.2f, 0.9f, + -0.5f, -0.2f, -0.4f, 0.3f, -0.8f, + -0.4f, 0.3f, -0.5f, -0.4f, -0.6f, + 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}; //{numUnits, inputSize} - auto inputToCellWeights = - MakeTensor(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f, - 0.5f, -0.2f, -0.3f, -0.2f, -0.6f, - 0.6f, -0.1f, -0.4f, -0.3f, -0.7f, - 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}); //{numUnits, inputSize} + std::vector inputToCellWeights = {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f, + 0.5f, -0.2f, -0.3f, -0.2f, -0.6f, + 0.6f, -0.1f, -0.4f, -0.3f, -0.7f, + 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}; //{numUnits, inputSize} - auto inputToOutputWeights = - MakeTensor(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f, - -0.7f, 0.3f, -0.3f, -0.8f, -0.2f, - 0.6f, -0.2f, 0.4f, -0.7f, -0.3f, - -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}); //{numUnits, inputSize} + std::vector inputToOutputWeights = {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f, + -0.7f, 0.3f, -0.3f, -0.8f, -0.2f, + 0.6f, -0.2f, 0.4f, -0.7f, -0.3f, + -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}; //{numUnits, inputSize} - auto inputGateBias = - MakeTensor(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f}); //{numUnits} + std::vector inputGateBias = {0.03f, 0.15f, 0.22f, 0.38f}; //{numUnits} - auto forgetGateBias = - MakeTensor(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f}); //{numUnits} + std::vector forgetGateBias = {0.1f, -0.3f, -0.2f, 0.1f}; //{numUnits} - auto cellBias = - MakeTensor(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits} + std::vector cellBias = {-0.05f, 0.72f, 0.25f, 0.08f}; //{numUnits} - auto outputGateBias = - MakeTensor(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f}); //{numUnits} + std::vector outputGateBias = {0.05f, -0.01f, 0.2f, 0.1f}; //{numUnits} - auto recurrentToInputWeights = - MakeTensor(tensorInfo4x3, {-0.2f, -0.3f, 0.4f, + std::vector recurrentToInputWeights ={-0.2f, -0.3f, 0.4f, 0.1f, -0.5f, 0.9f, -0.2f, -0.3f, -0.7f, - 0.05f, -0.2f, -0.6f}); //{numUnits, outputSize} + 0.05f, -0.2f, -0.6f}; //{numUnits, outputSize} - auto recurrentToCellWeights = - MakeTensor(tensorInfo4x3, {-0.3f, 0.2f, 0.1f, + std::vector recurrentToCellWeights = {-0.3f, 0.2f, 0.1f, -0.3f, 0.8f, -0.08f, -0.2f, 0.3f, 0.8f, - -0.6f, -0.1f, 0.2f}); //{numUnits, outputSize} + -0.6f, -0.1f, 0.2f}; //{numUnits, outputSize} - auto recurrentToForgetWeights = - MakeTensor(tensorInfo4x3, {-0.5f, -0.3f, -0.5f, - -0.2f, 0.6f, 0.4f, - 0.9f, 0.3f, -0.1f, - 0.2f, 0.5f, 0.2f}); //{numUnits, outputSize} + std::vector recurrentToForgetWeights = { -0.5f, -0.3f, -0.5f, + -0.2f, 0.6f, 0.4f, + 0.9f, 0.3f, -0.1f, + 0.2f, 0.5f, 0.2f}; //{numUnits, outputSize} - auto recurrentToOutputWeights = - MakeTensor(tensorInfo4x3, { 0.3f, -0.1f, 0.1f, - -0.2f, -0.5f, -0.7f, - -0.2f, -0.6f, -0.1f, - -0.4f, -0.7f, -0.2f}); //{numUnits, outputSize} + std::vector recurrentToOutputWeights = { 0.3f, -0.1f, 0.1f, + -0.2f, -0.5f, -0.7f, + -0.2f, -0.6f, -0.1f, + -0.4f, -0.7f, -0.2f}; //{numUnits, outputSize} - auto cellToInputWeights = - MakeTensor(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f}); //{numUnits} + std::vector cellToInputWeights = {0.05f, 0.1f, 0.25f, 0.15f}; //{numUnits} - auto cellToForgetWeights = - MakeTensor(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits} + std::vector cellToForgetWeights = {-0.02f, -0.15f, -0.25f, -0.03f}; //{numUnits} - auto cellToOutputWeights = - MakeTensor(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f}); //{numUnits} + std::vector cellToOutputWeights = {0.1f, -0.1f, -0.5f, 0.05f}; //{numUnits} - auto projectionWeights = - MakeTensor(tensorInfo3x4, - {-0.1f, 0.2f, 0.01f, -0.2f, - 0.1f, 0.5f, 0.3f, 0.08f, - 0.07f, 0.2f, -0.4f, 0.2f}); //{outputSize, numUnits} + std::vector projectionWeights = {-0.1f, 0.2f, 0.01f, -0.2f, + 0.1f, 0.5f, 0.3f, 0.08f, + 0.07f, 0.2f, -0.4f, 0.2f}; //{outputSize, numUnits} - std::vector projectionBiasVector(outputSize, 0.f); - auto projectionBias = MakeTensor(tensorInfo3, projectionBiasVector); //{outputSize} + std::vector projectionBiasVector(outputSize, 0.f); //{outputSize} - auto inputLayerNormWeights = - MakeTensor(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits} + std::vector inputLayerNormWeights = {0.1f, 0.2f, 0.3f, 0.5f}; //{numUnits} - auto forgetLayerNormWeights = - MakeTensor(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits} + std::vector forgetLayerNormWeights = {0.2f, 0.2f, 0.4f, 0.3f}; //{numUnits} - auto cellLayerNormWeights = - MakeTensor(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits} + std::vector cellLayerNormWeights = {0.7f, 0.2f, 0.3f, 0.8f}; //{numUnits} - auto outputLayerNormWeights = - MakeTensor(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits} + std::vector outputLayerNormWeights = {0.6f, 0.2f, 0.2f, 0.5f}; //{numUnits} armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5); @@ -1482,28 +1409,28 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4); armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4); - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]); - - AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data()); + + AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); data.m_InputToInputWeights = &inputToInputWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1546,28 +1473,33 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult QuantizedLstmTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputExpectedShape) { IgnoreUnused(memoryManager); - auto numBatches = armnn::numeric_cast(input.shape()[0]); - auto inputSize = armnn::numeric_cast(input.shape()[1]); - auto outputSize = armnn::numeric_cast(outputExpected.shape()[1]); + auto numBatches = armnn::numeric_cast(inputShape[0]); + auto inputSize = armnn::numeric_cast(inputShape[1]); + auto outputSize = armnn::numeric_cast(outputExpectedShape[1]); // Scale/Offset for input/output, cellState In/Out, weights, bias float inputOutputScale = 0.0078125f; @@ -1598,29 +1530,23 @@ LayerTestResult QuantizedLstmTestImpl( inputOutputScale, inputOutputOffset); - LayerTestResult ret(outputStateInfo); - // Input0 std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); // Input1 std::vector cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13 - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); - // Input2 std::vector outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14 - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output0 std::vector cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0 - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); // Output1 std::vector outputVector; // 1 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -1658,24 +1584,24 @@ LayerTestResult QuantizedLstmTestImpl( armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset); // Weights and bias tensor data - auto inputToInputWeights = MakeTensor(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108}); - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48}); - - auto recurrentToInputWeights = MakeTensor(recurrentWeightsInfo, - {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26}); - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98}); - - auto inputGateBias = MakeTensor(biasInfo, {-7876, 13488, -726, 32839}); - auto forgetGateBias = MakeTensor(biasInfo, {9206, -46884, -11693, -38724}); - auto cellBias = MakeTensor(biasInfo, {39481, 48624, 48976, -21419}); - auto outputGateBias = MakeTensor(biasInfo, {-58999, -17050, -41852, -40538}); + std::vector inputToInputWeights = {146, 250, 235, 171, 10, 218, 171, 108}; + std::vector inputToForgetWeights = {24, 50, 132, 179, 158, 110, 3, 169}; + std::vector inputToCellWeights = {133, 34, 29, 49, 206, 109, 54, 183}; + std::vector inputToOutputWeights = {195, 187, 11, 99, 109, 10, 218, 48}; + + std::vector recurrentToInputWeights = + {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26}; + std::vector recurrentToForgetWeights = + {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253}; + std::vector recurrentToCellWeights = + {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216}; + std::vector recurrentToOutputWeights = + {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98}; + + std::vector inputGateBias = {-7876, 13488, -726, 32839}; + std::vector forgetGateBias = {9206, -46884, -11693, -38724}; + std::vector cellBias = {39481, 48624, 48976, -21419}; + std::vector outputGateBias = {-58999, -17050, -41852, -40538}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo); @@ -1694,20 +1620,20 @@ LayerTestResult QuantizedLstmTestImpl( armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); // Setup queue descriptor data.m_InputToInputWeights = &inputToInputWeightsTensor; @@ -1734,15 +1660,18 @@ LayerTestResult QuantizedLstmTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } // QLSTM: CIFG, LayerNorm @@ -1750,8 +1679,8 @@ LayerTestResult QLstmTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected) { IgnoreUnused(memoryManager); unsigned int numBatches = 2; @@ -1816,21 +1745,18 @@ LayerTestResult QLstmTestImpl( // Input tensors std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); std::vector cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); std::vector outputStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output tensors - std::vector cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149}; - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); + std::vector cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149}; std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -1873,27 +1799,27 @@ LayerTestResult QLstmTestImpl( armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset); // Weights and bias tensor data - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, - {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, - {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, - {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}); - - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38}); - - auto forgetGateBias = MakeTensor(biasInfo, {2147484, -6442451, -4294968, 2147484}); - auto cellBias = MakeTensor(biasInfo, {-1073742, 15461883, 5368709, 1717987}); - auto outputGateBias = MakeTensor(biasInfo, {1073742, -214748, 4294968, 2147484}); - - auto forgetLayerNormWeights = MakeTensor(layerNormWeightsInfo, {6553, 6553, 13107, 9830}); - auto cellLayerNormWeights = MakeTensor(layerNormWeightsInfo, {22937, 6553, 9830, 26214}); - auto outputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {19660, 6553, 6553, 16384}); + std::vector inputToForgetWeights = + {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}; + std::vector inputToCellWeights = + {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}; + std::vector inputToOutputWeights = + {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}; + + std::vector recurrentToForgetWeights = + {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51}; + std::vector recurrentToCellWeights = + {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64}; + std::vector recurrentToOutputWeights = + {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38}; + + std::vector forgetGateBias = {2147484, -6442451, -4294968, 2147484}; + std::vector cellBias = {-1073742, 15461883, 5368709, 1717987}; + std::vector outputGateBias = {1073742, -214748, 4294968, 2147484}; + + std::vector forgetLayerNormWeights = {6553, 6553, 13107, 9830}; + std::vector cellLayerNormWeights = {22937, 6553, 9830, 26214}; + std::vector outputLayerNormWeights = {19660, 6553, 6553, 16384}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo); @@ -1913,21 +1839,21 @@ LayerTestResult QLstmTestImpl( armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); // Setup queue descriptor data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1972,15 +1898,18 @@ LayerTestResult QLstmTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } // QLSTM: Projection, LayerNorm @@ -1988,8 +1917,8 @@ LayerTestResult QLstmTestImpl1( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected) { IgnoreUnused(memoryManager); unsigned int numBatches = 2; @@ -2051,26 +1980,21 @@ LayerTestResult QLstmTestImpl1( outputScale, outputOffset); - LayerTestResult ret(outputStateInfo); - // Input tensors std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); std::vector cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); std::vector outputStateInVector = {0, 0, 0, 0, 0, 0}; - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output tensors std::vector cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939}; - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -2118,36 +2042,31 @@ LayerTestResult QLstmTestImpl1( 0); // Weights and bias tensor data - auto inputToInputWeights = MakeTensor(inputWeightsInfo, - {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13}); - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, - {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, - {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, - {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}); - - auto recurrentToInputWeights = MakeTensor(recurrentWeightsInfo, - {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77}); - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}); - - auto inputGateBias = MakeTensor(biasInfo, {644245, 3221226, 4724464, 8160438}); - auto forgetGateBias = MakeTensor(biasInfo, {2147484, -6442451, -4294968, 2147484}); - auto cellBias = MakeTensor(biasInfo, {-1073742, 15461883, 5368709, 1717987}); - auto outputGateBias = MakeTensor(biasInfo, {1073742, -214748, 4294968, 2147484}); - - auto inputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {3277, 6553, 9830, 16384}); - auto forgetLayerNormWeights = MakeTensor(layerNormWeightsInfo, {6553, 6553, 13107, 9830}); - auto cellLayerNormWeights = MakeTensor(layerNormWeightsInfo, {22937, 6553, 9830, 26214}); - auto outputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {19660, 6553, 6553, 16384}); - - auto projectionWeights = MakeTensor(projectionWeightsInfo, - {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}); + std::vector inputToInputWeights = + {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13}; + std::vector inputToForgetWeights = + {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}; + std::vector inputToCellWeights = + {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}; + std::vector inputToOutputWeights = + {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}; + + std::vector recurrentToInputWeights = {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77}; + std::vector recurrentToForgetWeights = {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}; + std::vector recurrentToCellWeights = {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}; + std::vector recurrentToOutputWeights = {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}; + + std::vector inputGateBias = {644245, 3221226, 4724464, 8160438}; + std::vector forgetGateBias = {2147484, -6442451, -4294968, 2147484}; + std::vector cellBias = {-1073742, 15461883, 5368709, 1717987}; + std::vector outputGateBias = {1073742, -214748, 4294968, 2147484}; + + std::vector inputLayerNormWeights = {3277, 6553, 9830, 16384}; + std::vector forgetLayerNormWeights = {6553, 6553, 13107, 9830}; + std::vector cellLayerNormWeights = {22937, 6553, 9830, 26214}; + std::vector outputLayerNormWeights = {19660, 6553, 6553, 16384}; + + std::vector projectionWeights = {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo); @@ -2173,27 +2092,27 @@ LayerTestResult QLstmTestImpl1( armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); // Setup queue descriptor data.m_InputToInputWeights = &inputToInputWeightsTensor; @@ -2244,15 +2163,18 @@ LayerTestResult QLstmTestImpl1( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } // QLSTM: Projection, CIFG, LayerNorm @@ -2260,8 +2182,8 @@ LayerTestResult QLstmTestImpl2( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected) { IgnoreUnused(memoryManager); unsigned int numBatches = 2; @@ -2323,26 +2245,21 @@ LayerTestResult QLstmTestImpl2( outputScale, outputOffset); - LayerTestResult ret(outputStateInfo); - // Input tensors std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); std::vector cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); std::vector outputStateInVector = {0, 0, 0, 0, 0, 0}; - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output tensors - std::vector cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939}; - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); + std::vector cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939}; std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -2390,30 +2307,29 @@ LayerTestResult QLstmTestImpl2( 0); // Weights and bias tensor data - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, - {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, - {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, - {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}); - - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}); - - auto forgetGateBias = MakeTensor(biasInfo, {2147484, -6442451, -4294968, 2147484}); - auto cellBias = MakeTensor(biasInfo, {-1073742, 15461883, 5368709, 1717987}); - auto outputGateBias = MakeTensor(biasInfo, {1073742, -214748, 4294968, 2147484}); - - auto forgetLayerNormWeights = MakeTensor(layerNormWeightsInfo, {6553, 6553, 13107, 9830}); - auto cellLayerNormWeights = MakeTensor(layerNormWeightsInfo, {22937, 6553, 9830, 26214}); - auto outputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {19660, 6553, 6553, 16384}); - - auto projectionWeights = MakeTensor(projectionWeightsInfo, - {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}); + std::vector inputToForgetWeights = + {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}; + std::vector inputToCellWeights = + {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}; + std::vector inputToOutputWeights = + {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}; + + std::vector recurrentToForgetWeights = + {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}; + std::vector recurrentToCellWeights = + {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}; + std::vector recurrentToOutputWeights = + {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}; + + std::vector forgetGateBias = {2147484, -6442451, -4294968, 2147484}; + std::vector cellBias = {-1073742, 15461883, 5368709, 1717987}; + std::vector outputGateBias = {1073742, -214748, 4294968, 2147484}; + + std::vector forgetLayerNormWeights = {6553, 6553, 13107, 9830}; + std::vector cellLayerNormWeights = {22937, 6553, 9830, 26214}; + std::vector outputLayerNormWeights = {19660, 6553, 6553, 16384}; + + std::vector projectionWeights = {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo); @@ -2435,23 +2351,23 @@ LayerTestResult QLstmTestImpl2( armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); // Setup queue descriptor data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -2498,15 +2414,18 @@ LayerTestResult QLstmTestImpl2( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } @@ -2519,13 +2438,10 @@ LayerTestResult QLstmTestImpl2( void LstmUtilsZeroVectorTest() { armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {2., 3., 3., 4.})); - - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( - {0., 0., 0., 0.})); + std::vector input = {2., 3., 3., 4.}; + std::vector expectedOutput = {0., 0., 0., 0.}; - return LstmUtilsZeroVectorTestImpl(input, 4, expectedOutput); + return LstmUtilsZeroVectorTestImpl(input, 4, expectedOutput, inputDesc.GetShape()); } void LstmUtilsMeanStddevNormalizationNoneZeroInputTest() @@ -2533,16 +2449,16 @@ void LstmUtilsMeanStddevNormalizationNoneZeroInputTest() uint32_t batchSize = 2; uint32_t vecSize = 4; armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0 - 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1 + std::vector input = + { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0 + 0.9f, 1.0f, 1.1f, 1.2f }; //batch 1 - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( - { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0 - -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1 + std::vector expectedOutput = + { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0 + -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f }; //batch 1 return LstmUtilsMeanStddevNormalizationTestImpl(input, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, inputDesc.GetShape()); } void LstmUtilsMeanStddevNormalizationAllZeroInputTest() @@ -2550,16 +2466,16 @@ void LstmUtilsMeanStddevNormalizationAllZeroInputTest() uint32_t batchSize = 2; uint32_t vecSize = 4; armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1 + 0.0f, 0.0f, 0.0f, 0.0f }; //batch 1 - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( + std::vector expectedOutput = { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1 + 0.0f, 0.0f, 0.0f, 0.0f }; //batch 1 return LstmUtilsMeanStddevNormalizationTestImpl(input, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, inputDesc.GetShape()); } void LstmUtilsMeanStddevNormalizationMixedZeroInputTest() @@ -2567,16 +2483,16 @@ void LstmUtilsMeanStddevNormalizationMixedZeroInputTest() uint32_t batchSize = 2; uint32_t vecSize = 4; armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1 + std::vector input = + { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 + 0.1f, 0.2f, 0.3f, 0.4f }; //batch 1 - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( - { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1 + std::vector expectedOutput = + { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 + -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }; //batch 1 return LstmUtilsMeanStddevNormalizationTestImpl(input, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, inputDesc.GetShape()); } void LstmUtilsVectorBatchVectorCwiseProductTest() @@ -2584,13 +2500,13 @@ void LstmUtilsVectorBatchVectorCwiseProductTest() uint32_t batchSize = 4; uint32_t vecSize = 29; armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32); - boost::multi_array vector = MakeTensor(vecDesc, std::vector( + std::vector vector = { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f, - 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f})); + 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}; armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array batchVector = MakeTensor(batchVecDesc, std::vector( + std::vector batchVector = { /* batch 0 */ 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f, @@ -2606,10 +2522,10 @@ void LstmUtilsVectorBatchVectorCwiseProductTest() /* batch 3 */ -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f, -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f, - -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f})); + -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}; // Expect output = input * output + output. - boost::multi_array expectedOutput = MakeTensor(batchVecDesc, std::vector( + std::vector expectedOutput = { /* batch 0 */ 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f, 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f, @@ -2633,10 +2549,10 @@ void LstmUtilsVectorBatchVectorCwiseProductTest() -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f, -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f, -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f, - -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f})); + -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}; return LstmUtilsVectorBatchVectorCwiseProductTestImpl(vector, batchVector, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, vecDesc.GetShape()); } void LstmUtilsVectorBatchVectorAddTest() @@ -2644,20 +2560,23 @@ void LstmUtilsVectorBatchVectorAddTest() uint32_t batchSize = 2; uint32_t vecSize = 3; armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32); - boost::multi_array vector = MakeTensor(vecDesc, std::vector( - { 0.0f, -0.5f, 1.0f})); + std::vector vector = { 0.0f, -0.5f, 1.0f}; armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array batchVector = MakeTensor(batchVecDesc, std::vector( - { 1.0f, 2.0f, 3.0f, //batch 0 - 4.0f, 5.0f, 6.0f})); //batch 1 - - boost::multi_array expectedOutput = MakeTensor(batchVecDesc, std::vector( - { 1.0f, 1.5f, 4.0f, - 4.0f, 4.5f, 7.0f})); + std::vector batchVector = + { + 1.0f, 2.0f, 3.0f, //batch 0 + 4.0f, 5.0f, 6.0f //batch 1 + }; + + std::vector expectedOutput = + { + 1.0f, 1.5f, 4.0f, + 4.0f, 4.5f, 7.0f + }; return LstmUtilsVectorBatchVectorAddTestImpl(vector, batchVector, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, batchVecDesc.GetShape()); } #endif @@ -2668,15 +2587,15 @@ LayerTestResult LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - { 2., 3., 3., 4. })); + std::vector input = { 2., 3., 3., 4. }; armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( + std::vector expectedOutput = {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, - -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})); + -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}; return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape()); } LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest( @@ -2685,19 +2604,18 @@ LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, - 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})); + 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}; armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, - -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f, - -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, - 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, - -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f, - 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, - 0.02168f})); + std::vector expectedOutput = + {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, + -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f, + -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, + 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, + -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f, + 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f}; return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2708,16 +2626,16 @@ LayerTestResult LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {2., 3., 3., 4.})); + std::vector input = {2., 3., 3., 4.}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, - -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})); + std::vector expectedOutput = + {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, + -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}; return LstmNoCifgNoPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape()); } LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest( @@ -2726,14 +2644,14 @@ LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLa const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0 - 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1 + 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; //batch 1 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( + std::vector expectedOutput = { 0.0244077f, 0.128027f, -0.00170918f, //batch 0 - -0.00692428f, 0.0848741f, 0.063445f})); //batch 1 + -0.00692428f, 0.0848741f, 0.063445f}; //batch 1 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2750,22 +2668,20 @@ LayerTestResult LstmLayerInt16NoCifgNoPeepholeNoProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8; armnn::TensorInfo inputDesc({2, 2}, datatype); - boost::multi_array input = MakeTensor( - inputDesc, - armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset); armnn::TensorInfo outputDesc({2, 4}, datatype); - boost::multi_array expectedOutput = MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, - -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, + -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f + }, + qScale, qOffset); return LstmNoCifgNoPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(), + qScale, qOffset, constantDatatype); } @@ -2781,24 +2697,20 @@ LayerTestResult LstmLayerInt16WithCifgWithPeepholeNoProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8; armnn::TensorInfo inputDesc({ 2, 2 }, datatype); - boost::multi_array input = - MakeTensor( - inputDesc, - armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset); armnn::TensorInfo outputDesc({ 2, 4 }, datatype); - boost::multi_array expectedOutput = - MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, - -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, + -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f + }, + qScale, qOffset); return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(), + qScale, qOffset, constantDatatype); } LayerTestResult LstmLayerInt16NoCifgWithPeepholeWithProjectionTest( @@ -2813,32 +2725,26 @@ LayerTestResult LstmLayerInt16NoCifgWithPeepholeWithProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8; armnn::TensorInfo inputDesc({ 2, 5 }, datatype); - boost::multi_array input = - MakeTensor( - inputDesc, - armnnUtils::QuantizedVector( - { - 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, - 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f - }, - qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector( + { + 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, + 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f + }, + qScale, qOffset); armnn::TensorInfo outputDesc({ 2, 16 }, datatype); - boost::multi_array expectedOutput = - MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f, - -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f, - 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f, - 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f, - -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f, - -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f, - 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f, - 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f, + -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f, + 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f, + 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f, + -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f, + -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f, + 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f, + 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f + }, + qScale, qOffset); return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype); @@ -2855,23 +2761,20 @@ LayerTestResult LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16 armnn::TensorInfo inputDesc({2, 2}, datatype); - boost::multi_array input = - MakeTensor(inputDesc, - armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset); armnn::TensorInfo outputDesc({2, 4}, datatype); - boost::multi_array expectedOutput = - MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, - -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, + -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f + }, + qScale, qOffset); return LstmNoCifgNoPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, datatype); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(), + qScale, qOffset, datatype); } // @@ -2884,14 +2787,13 @@ LayerTestResult QuantizedLstmTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {166, 179, 50, 150})); + std::vector input = {166, 179, 50, 150}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {140, 151, 146, 112, 136, 156, 142, 112 })); + std::vector expectedOutput = {140, 151, 146, 112, 136, 156, 142, 112 }; - return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); + return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape()); } // QLSTM @@ -2901,12 +2803,10 @@ LayerTestResult QLstmTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {90, 102, 13, 26, 38, 102, 13, 26, 51, 64})); + std::vector input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmS8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {-15, 21, 14, 20, -15, 15, 5, 27})); + std::vector expectedOutput = {-15, 21, 14, 20, -15, 15, 5, 27}; return QLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2917,12 +2817,10 @@ LayerTestResult QLstmTest1( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {90, 102, 13, 26, 38, 102, 13, 26, 51, 64})); + std::vector input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {127, 127, -108, -67, 127, 127})); + std::vector expectedOutput = {127, 127, -108, -67, 127, 127}; return QLstmTestImpl1(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2933,12 +2831,10 @@ LayerTestResult QLstmTest2( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {90, 102, 13, 26, 38, 102, 13, 26, 51, 64})); + std::vector input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {127, 127, 127, -128, 127, 127})); + std::vector expectedOutput = {127, 127, 127, -128, 127, 127}; return QLstmTestImpl2(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } \ No newline at end of file -- cgit v1.2.1