diff options
author | Derek Lamberti <derek.lamberti@arm.com> | 2019-06-13 17:34:19 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-06-14 16:41:17 +0000 |
commit | c81855f6ca52eb025a303b95eee7a12a5e2f9557 (patch) | |
tree | 86ac342839f49348b54f29c4588b6a9e4059cd84 /src/backends/neon/workloads/NeonLstmFloatWorkload.cpp | |
parent | db48288f54ad441f34108cf270ed3089d3a465ea (diff) | |
download | armnn-c81855f6ca52eb025a303b95eee7a12a5e2f9557.tar.gz |
IVGCVSW-3278 Cl and Neon TensorHandles inherit from common base interface
Change-Id: Ia68da09d8f0fb0a04af9cb61062d7edaa5f1b887
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/backends/neon/workloads/NeonLstmFloatWorkload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonLstmFloatWorkload.cpp | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp index 1ab269ff56..c7f5f090ce 100644 --- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp @@ -97,13 +97,13 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descript lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get()); } - const arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); - const arm_compute::ITensor& output_state_in = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); - const arm_compute::ITensor& cell_state_in = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[2])->GetTensor(); + const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + const arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + const arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor(); - arm_compute::ITensor& output_state_out = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[1])->GetTensor(); - arm_compute::ITensor& cell_state_out = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[2])->GetTensor(); - arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[3])->GetTensor(); + arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor(); + arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor(); + arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[3])->GetTensor(); // Get the batch_size and the num_units from the cellStateIn dimensions const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2]; |