// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include namespace armnn { class NeonQuantizedLstmWorkload : public BaseWorkload { public: using BaseWorkload::m_Data; NeonQuantizedLstmWorkload(const QuantizedLstmQueueDescriptor& descriptor, const WorkloadInfo& info); virtual void Execute() const override; private: mutable arm_compute::NELSTMLayerQuantized m_QuantizedLstmLayer; std::unique_ptr m_InputToInputWeightsTensor; std::unique_ptr m_InputToForgetWeightsTensor; std::unique_ptr m_InputToCellWeightsTensor; std::unique_ptr m_InputToOutputWeightsTensor; std::unique_ptr m_RecurrentToInputWeightsTensor; std::unique_ptr m_RecurrentToForgetWeightsTensor; std::unique_ptr m_RecurrentToCellWeightsTensor; std::unique_ptr m_RecurrentToOutputWeightsTensor; std::unique_ptr m_InputGateBiasTensor; std::unique_ptr m_ForgetGateBiasTensor; std::unique_ptr m_CellBiasTensor; std::unique_ptr m_OutputGateBiasTensor; std::unique_ptr m_CellStateInTensor; std::unique_ptr m_OutputStateInTensor; std::unique_ptr m_CellStateOutTensor; void FreeUnusedTensors(); }; arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn, const TensorInfo& cellStateIn, const TensorInfo& outputStateOut, const TensorInfo& cellStateOut, const QuantizedLstmInputParamsInfo& paramsInfo); } //namespace armnn