22 const bool biasEnabled,
24 const bool transposeWeights)
27 unsigned int outputSize = rOutputShape[1];
29 const std::vector<float> decodedInputs = rInputDecoder.
DecodeTensor(rInputShape);
30 const std::vector<float> decodedWeights = rWeightDecoder.
DecodeTensor(rWeightsShape);
35 const std::vector<float> decodedBiases = biasEnabled ? pBiasDecoder->
DecodeTensor(biasShape) : std::vector<float>();
38 for (
unsigned int n = 0; n < rInputShape[0]; n++)
40 for (
unsigned int channelOutput = 0; channelOutput < outputSize; channelOutput++)
44 for (
unsigned int channelInput = 0; channelInput < K; channelInput++)
49 weight = decodedWeights[channelOutput * K + channelInput];
53 weight = decodedWeights[channelInput * outputSize + channelOutput];
56 outval += weight * decodedInputs[n * K + channelInput];
61 outval += decodedBiases[channelOutput];
64 rOutputEncoder[n * outputSize + channelOutput];
65 rOutputEncoder.
Set(outval);
virtual std::vector< float > DecodeTensor(const TensorShape &tensorShape, bool isDepthwise=false)=0
virtual void Set(IType right)=0
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_ASSERT(COND)
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.