21.02
NeonConvertFp32ToBf16Workload.hpp
Go to the documentation of this file.
1
//
2
// Copyright © 2020 Arm Ltd. All rights reserved.
3
// SPDX-License-Identifier: MIT
4
//
5
6
#pragma once
7
8
#include <
backendsCommon/Workload.hpp
>
9
#include <
backendsCommon/WorkloadData.hpp
>
10
#include <
neon/workloads/NeonWorkloadUtils.hpp
>
11
12
namespace
armnn
13
{
14
15
class
NeonConvertFp32ToBf16Workload
:
public
Float32ToBFloat16Workload
<ConvertFp32ToBf16QueueDescriptor>
16
{
17
public
:
18
NeonConvertFp32ToBf16Workload
(
const
ConvertFp32ToBf16QueueDescriptor
& descriptor,
const
WorkloadInfo
&
info
);
19
virtual
void
Execute
()
const override
;
20
21
private
:
22
using
TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
23
std::vector<TensorHandlePair> m_TensorHandlePairs;
24
};
25
26
}
//namespace armnn
WorkloadData.hpp
armnn::NeonConvertFp32ToBf16Workload::Execute
virtual void Execute() const override
Definition:
NeonConvertFp32ToBf16Workload.cpp:26
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition:
01_00_software_tools.dox:6
armnn::NeonConvertFp32ToBf16Workload::NeonConvertFp32ToBf16Workload
NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info)
Definition:
NeonConvertFp32ToBf16Workload.cpp:18
armnn::NeonConvertFp32ToBf16Workload
Definition:
NeonConvertFp32ToBf16Workload.hpp:15
armnn::MultiTypedWorkload
Definition:
Workload.hpp:105
NeonWorkloadUtils.hpp
Workload.hpp
armnn::BoostLogSeverityMapping::info
armnn::ConvertFp32ToBf16QueueDescriptor
Definition:
WorkloadData.hpp:444
armnn::WorkloadInfo
Contains information about inputs and outputs to a layer.
Definition:
WorkloadInfo.hpp:16
src
backends
neon
workloads
NeonConvertFp32ToBf16Workload.hpp
Generated on Fri Mar 19 2021 15:26:03 for ArmNN by
1.8.13