ArmNN
 22.11
NeonConvertFp32ToBf16Workload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
11 
12 namespace armnn
13 {
14 
15 class NeonConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>
16 {
17 public:
19  virtual void Execute() const override;
20  // Replace input tensor handle with the given TensorHandle
21  void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
22 
23  // Replace output tensor handle with the given TensorHandle
24  void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
25 private:
26  using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
27  std::vector<TensorHandlePair> m_TensorHandlePairs;
28  virtual void Reconfigure();
29 };
30 
31 } //namespace armnn
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Copyright (c) 2021 ARM Limited and Contributors.
NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info)
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Contains information about TensorInfos of a layer.