20.02
NeonConvertFp16ToFp32Workload.hpp
Go to the documentation of this file.
1
//
2
// Copyright © 2017 Arm Ltd. All rights reserved.
3
// SPDX-License-Identifier: MIT
4
//
5
6
#pragma once
7
8
#include <
backendsCommon/Workload.hpp
>
9
#include <
backendsCommon/WorkloadData.hpp
>
10
#include <
neon/workloads/NeonWorkloadUtils.hpp
>
11
12
namespace
armnn
13
{
14
15
class
NeonConvertFp16ToFp32Workload
:
public
Float16ToFloat32Workload
<ConvertFp16ToFp32QueueDescriptor>
16
{
17
public
:
18
NeonConvertFp16ToFp32Workload
(
const
ConvertFp16ToFp32QueueDescriptor
& descriptor,
const
WorkloadInfo
&
info
);
19
virtual
void
Execute
()
const override
;
20
21
private
:
22
using
TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
23
std::vector<TensorHandlePair> m_TensorHandlePairs;
24
};
25
26
}
//namespace armnn
WorkloadData.hpp
armnn::NeonConvertFp16ToFp32Workload::Execute
virtual void Execute() const override
Definition:
NeonConvertFp16ToFp32Workload.cpp:25
armnn
Copyright (c) 2020 ARM Limited.
Definition:
00_introduction.dox:25
armnn::MultiTypedWorkload
Definition:
Workload.hpp:105
armnn::ConvertFp16ToFp32QueueDescriptor
Definition:
WorkloadData.hpp:410
NeonWorkloadUtils.hpp
armnn::NeonConvertFp16ToFp32Workload
Definition:
NeonConvertFp16ToFp32Workload.hpp:15
Workload.hpp
armnn::BoostLogSeverityMapping::info
armnn::NeonConvertFp16ToFp32Workload::NeonConvertFp16ToFp32Workload
NeonConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info)
Definition:
NeonConvertFp16ToFp32Workload.cpp:17
armnn::WorkloadInfo
Contains information about inputs and outputs to a layer.
Definition:
WorkloadInfo.hpp:16
src
backends
neon
workloads
NeonConvertFp16ToFp32Workload.hpp
Generated on Fri Mar 13 2020 16:09:12 for ArmNN by
1.8.13