20 const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
21 m_WeightShape = rWeightInfo.
GetShape();
22 m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(
true));
26 m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.
m_Bias));
27 const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
28 m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(
true));
37 m_InputDecoder = MakeDecoder<float>(inputInfo);
40 m_OutputShape = outputInfo.GetShape();
41 m_OutputEncoder = MakeEncoder<float>(outputInfo);
46 m_NumActivations *= inputInfo.
GetShape()[i];
const TensorShape & GetShape() const
CPU Execution: Reference C++ kernels.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
const FullyConnectedQueueDescriptor m_Data
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void PostAllocationConfigure() override
virtual void Execute() const override
bool m_BiasEnabled
Enable/disable bias.
RefFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
const ConstCpuTensorHandle * m_Bias
unsigned int GetNumDimensions() const