ArmNN
 22.08
NeonConvertFp16ToFp32Workload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
11 
12 namespace armnn
13 {
14 
15 class NeonConvertFp16ToFp32Workload : public Float16ToFloat32Workload<ConvertFp16ToFp32QueueDescriptor>
16 {
17 public:
19  virtual void Execute() const override;
20  // Replace input tensor handle with the given TensorHandle
21  void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
22 
23  // Replace output tensor handle with the given TensorHandle
24  void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
25 private:
26  using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
27  std::vector<TensorHandlePair> m_TensorHandlePairs;
28  virtual void Reconfigure();
29 };
30 
31 } //namespace armnn
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Copyright (c) 2021 ARM Limited and Contributors.
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
NeonConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info)
Contains information about TensorInfos of a layer.