ArmNN
 21.08
RefFullyConnectedWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "FullyConnected.hpp"
9 #include "RefWorkloadUtils.hpp"
10 
11 #include "Profiling.hpp"
12 
13 namespace armnn
14 {
16  const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
17  : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
18 {
19 }
20 
22 {
24 }
25 
26 void RefFullyConnectedWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
27  std::vector<ITensorHandle*> outputs)
28 {
29  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
30  ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
31  m_InputShape = inputInfo.GetShape();
32 
33  const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
34  ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
35  m_WeightShape = rWeightInfo.GetShape();
36  m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
37 
39  {
40  const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
41  m_BiasDecoder = MakeDecoder<float>(biasInfo);
42  }
43 
44  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
45  m_OutputShape = outputInfo.GetShape();
46 
47  m_NumActivations = 1; // Total number of activations in the input.
48  for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++)
49  {
50  m_NumActivations *= inputInfo.GetShape()[i];
51  }
52 }
53 
55 {
57 }
58 
60 {
61  PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
62 
63  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
64 }
65 
66 void RefFullyConnectedWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
67 {
68  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute");
69 
70  std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
71  std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
72 
73  m_WeightDecoder->Reset(inputs[1]->Map());
75  {
76  m_BiasDecoder->Reset(inputs[2]->Map());
77  }
78 
79  FullyConnected(m_InputShape,
80  *inputDecoder,
81  m_OutputShape,
82  *OutputEncoder,
83  m_WeightShape,
84  *m_WeightDecoder,
85  m_BiasDecoder.get(),
87  m_NumActivations,
89 }
90 
91 } //namespace armnn
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
CPU Execution: Reference C++ kernels.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:219
bool m_BiasEnabled
Enable/disable bias.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
RefFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195