ArmNN
 21.05
RefFullyConnectedWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "FullyConnected.hpp"
9 #include "RefWorkloadUtils.hpp"
10 
11 #include "Profiling.hpp"
12 
13 namespace armnn
14 {
16  const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
17  : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
18 {
19  if (descriptor.m_Parameters.m_ConstantWeights)
20  {
21  m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
22  const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
23  m_WeightShape = rWeightInfo.GetShape();
24  m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(true));
25 
26  if (descriptor.m_Parameters.m_BiasEnabled)
27  {
28  m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
29  const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
30  m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
31  }
32  }
33 }
34 
36 {
38 }
39 
40 void RefFullyConnectedWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
41  std::vector<ITensorHandle*> outputs)
42 {
43  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
44  ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
45  m_InputShape = inputInfo.GetShape();
46 
48  {
49  const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
50  ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
51  m_WeightShape = rWeightInfo.GetShape();
52  m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
53 
55  {
56  const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
57  m_BiasDecoder = MakeDecoder<float>(biasInfo);
58  }
59  }
60 
61  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
62  m_OutputShape = outputInfo.GetShape();
63 
64  m_NumActivations = 1; // Total number of activations in the input.
65  for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++)
66  {
67  m_NumActivations *= inputInfo.GetShape()[i];
68  }
69 }
70 
72 {
74 }
75 
77 {
78  PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
79 
80  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
81 }
82 
83 void RefFullyConnectedWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
84 {
85  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute");
86 
87  std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
88  std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
89 
91  {
92  m_WeightDecoder->Reset(inputs[1]->Map());
94  {
95  m_BiasDecoder->Reset(inputs[2]->Map());
96  }
97  }
98 
99  FullyConnected(m_InputShape,
100  *inputDecoder,
101  m_OutputShape,
102  *OutputEncoder,
103  m_WeightShape,
104  *m_WeightDecoder,
105  *m_BiasDecoder,
107  m_NumActivations,
109 }
110 
111 } //namespace armnn
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
CPU Execution: Reference C++ kernels.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Copyright (c) 2021 ARM Limited and Contributors.
const ConstTensorHandle * m_Bias
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:173
bool m_BiasEnabled
Enable/disable bias.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
const ConstTensorHandle * m_Weight
RefFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
bool m_ConstantWeights
Enable/disable constant weights and biases.