From 270641b6feb12cd68c812060b9828ac7d87d4826 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 22 May 2023 10:57:47 +0100 Subject: MLCE-1022 Fix failure on UnidirectionalSequenceLstm Operator * Fix failure to parse UnidirectionalSequenceLstm Operator on CpuAcc * Fix failure to parse UnidirectionalSequenceLstm Operator on GpuAcc * Fix IsLayerSupported tests when there are multiple otutputs Signed-off-by: Narumol Prangnawarat Change-Id: Ia690f34d3c7fae87bd36c97056a3ff71baa865f6 --- src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp | 6 +++--- .../cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp | 2 +- .../neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 5475762a53..182fab97be 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -819,16 +819,16 @@ bool IsLayerSupportedTest(FactoryType *factory, Tag) armnn::TensorInfo output = MakeDummyTensorInfo(); previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); // Connect all outputs of the previous layer to inputs of tested layer. - for (unsigned int i = 0; i < numIn; i++) + for (unsigned int i = 0; i < numIn; ++i) { armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0); armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i); previousLayerOutputSlot.Connect(layerInputSlot); } // Set outputs of tested layer to a dummy tensor. - for (unsigned int i = 0; i < numOut; i++) + for (unsigned int i = 0; i < numOut; ++i) { - layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); + layer.m_Layer->GetOutputSlot(i).SetTensorInfo(output); } std::string layerName = LayerPolicy::NameStr; diff --git a/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp index ac24120804..289442e1cc 100644 --- a/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp @@ -517,7 +517,7 @@ ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo& input, IgnoreUnused(hiddenStateOutput, cellStateOutput); TensorShape inputLayerShape = input.GetShape(); - TensorShape outputLayerShape = outputStateIn.GetShape(); + TensorShape outputLayerShape = output.GetShape(); unsigned int maxTime = descriptor.m_TimeMajor?inputLayerShape[0]:inputLayerShape[1]; unsigned int batchSize = descriptor.m_TimeMajor?inputLayerShape[1]:inputLayerShape[0]; diff --git a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp index 8dba719d91..7bdb2d5a5a 100644 --- a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp @@ -517,7 +517,7 @@ NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo& input, const LstmInputParamsInfo& paramsInfo) { TensorShape inputLayerShape = input.GetShape(); - TensorShape outputLayerShape = outputStateIn.GetShape(); + TensorShape outputLayerShape = output.GetShape(); unsigned int maxTime = descriptor.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1]; unsigned int batchSize = descriptor.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0]; -- cgit v1.2.1