diff options
Diffstat (limited to 'src/backends/neon/workloads/NeonQuantizeWorkload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonQuantizeWorkload.cpp | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp new file mode 100644 index 0000000000..ef24a7f40b --- /dev/null +++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp @@ -0,0 +1,50 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonQuantizeWorkload.hpp" +#include "NeonWorkloadUtils.hpp" + +#include <neon/NeonTensorHandle.hpp> +#include <aclCommon/ArmComputeTensorUtils.hpp> +#include <arm_compute/core/Types.h> + +#include <boost/polymorphic_pointer_cast.hpp> + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output) +{ + const arm_compute::TensorInfo neonInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEQuantizationLayer::validate(&neonInputInfo, &neonOutputInfo); +} + +NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, + const WorkloadInfo& workloadInfo) + : BaseWorkload<QuantizeQueueDescriptor>(descriptor, workloadInfo) +{ + arm_compute::ITensor& input = boost::polymorphic_pointer_downcast<INeonTensorHandle>( + m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>( + m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.reset(new arm_compute::NEQuantizationLayer()); + m_Layer->configure(&input, &output); + m_Layer->prepare(); +} + +void NeonQuantizeWorkload::Execute() const +{ + if (m_Layer) + { + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonQuantizeWorkload_Execute"); + m_Layer->run(); + } +} + +} // namespace armnn |