blob: 96596b9d9ca64d4e8beb118a758f6cabd61136d7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// See LICENSE file in the project root for full license information.
//
#include "ClFullyConnectedFloat32Workload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
namespace armnn
{
using namespace armcomputetensorutils;
ClFullyConnectedFloat32Workload::ClFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info)
: Float32Workload<FullyConnectedQueueDescriptor>(descriptor, info)
{
BuildArmComputeTensor(m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
arm_compute::CLTensor* optionalBiasTensor = nullptr;
if (m_Data.m_Parameters.m_BiasEnabled)
{
BuildArmComputeTensor(m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
optionalBiasTensor = &m_BiasesTensor;
}
m_Data.ValidateInputsOutputs("ClFullyConnectedFloat32Workload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
// Construct
m_FullyConnected.configure(
&input, &m_WeightsTensor, optionalBiasTensor, &output, m_Data.m_Parameters.m_TransposeWeightMatrix);
// Allocate
InitialiseArmComputeClTensorData(m_WeightsTensor, m_Data.m_Weight->GetConstTensor<float>());
if (optionalBiasTensor)
{
InitialiseArmComputeClTensorData(*optionalBiasTensor, m_Data.m_Bias->GetConstTensor<float>());
}
}
void ClFullyConnectedFloat32Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClFullyConnectedFloat32Workload_Execute");
m_FullyConnected.run();
}
} //namespace armnn
|