27 std::vector<ITensorHandle*> outputs)
35 m_WeightShape = rWeightInfo.
GetShape();
36 m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
41 m_BiasDecoder = MakeDecoder<float>(biasInfo);
45 m_OutputShape = outputInfo.GetShape();
50 m_NumActivations *= inputInfo.
GetShape()[i];
70 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(
GetTensorInfo(inputs[0]), inputs[0]->Map());
71 std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(
GetTensorInfo(outputs[0]), outputs[0]->Map());
73 m_WeightDecoder->Reset(inputs[1]->
Map());
76 m_BiasDecoder->Reset(inputs[2]->
Map());
const TensorShape & GetShape() const
CPU Execution: Reference C++ kernels.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Copyright (c) 2021 ARM Limited and Contributors.
LayerDescriptor m_Parameters
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void PostAllocationConfigure() override
std::vector< ITensorHandle * > m_Inputs
void Execute() const override
FullyConnectedQueueDescriptor m_Data
bool m_BiasEnabled
Enable/disable bias.
#define ARMNN_ASSERT(COND)
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
RefFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetNumDimensions() const