// // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "NeonFloorFloatWorkload.hpp" #include "NeonWorkloadUtils.hpp" #include #include namespace armnn { NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info) : FloatWorkload(descriptor, info) { m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1); arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); m_Layer.reset(layer.release()); } void NeonFloorFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFloorFloatWorkload_Execute", this->GetGuid()); m_Layer->run(); } void NeonFloorFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) { ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; this->m_Data.m_Inputs[slot] = tensorHandle; try { Reconfigure(); } catch(armnn::UnimplementedException& e) { // Cannot reconfigure, revert the slot back and throw the exception. this->m_Data.m_Inputs[slot] = backupHandle; throw e; } } // Replace output tensor handle with the given TensorHandle void NeonFloorFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) { ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; this->m_Data.m_Inputs[slot] = tensorHandle; try { Reconfigure(); } catch(armnn::UnimplementedException& e) { // Cannot reconfigure, revert the slot back and throw the exception. this->m_Data.m_Inputs[slot] = backupHandle; throw e; } } void NeonFloorFloatWorkload::Reconfigure() { throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); } } //namespace armnn