23 template<
typename T,
typename B>
31 boost::multi_array<T, 2>& weights,
32 boost::multi_array<B, 1>& bias,
33 boost::multi_array<T, 4>& input,
35 bool transposeWeights)
38 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
49 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
59 inputHandle->Allocate();
60 outputHandle->Allocate();
63 ExecuteWorkload(*workload, memoryManager);
70 template<armnn::DataType ArmnnType,
typename T>
76 constexpr
static unsigned int inputWidth = 3u;
77 constexpr
static unsigned int inputHeight = 2u;
78 constexpr
static unsigned int inputChannels = 1u;
80 constexpr
static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
82 constexpr
static unsigned int outputChannels = 2u;
84 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
86 inputTensorInfo.SetQuantizationOffset(63);
90 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
94 weightsDesc.SetQuantizationOffset(93);
97 biasesDesc.
SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
98 biasesDesc.SetQuantizationOffset(0);
102 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
109 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
111 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
112 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
116 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
118 result = SimpleFullyConnectedTestImpl<T>(
121 inputTensorInfo, outputTensorInfo,
122 weightsDesc, biasesDesc,
123 weights, bias, input,
130 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
134 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
135 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
147 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
151 bool transposeWeights,
155 unsigned int inputWidth = 1;
156 unsigned int inputHeight = 1;
157 unsigned int inputChannels = 5;
158 unsigned int inputNum = 1;
160 unsigned int outputChannels = 1;
161 unsigned int outputNum = 1;
169 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
170 unsigned int outputShape[] = { outputNum, outputChannels };
171 unsigned int weightsShape[] = { inputChannels, outputChannels };
172 if (transposeWeights)
174 std::swap(weightsShape[0], weightsShape[1]);
177 unsigned int biasShape[] = { outputChannels };
185 if(armnn::IsQuantizedType<T>())
195 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
196 armnnUtils::QuantizedVector<T>({
197 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
202 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
203 armnnUtils::QuantizedVector<T>({
204 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
209 std::vector<T> biasValues({900000.f});
210 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
212 result = SimpleFullyConnectedTestImpl<T>(
215 inputTensorInfo, outputTensorInfo,
216 weightsDesc, biasesDesc,
217 weights, bias, input,
218 true, transposeWeights
222 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
232 FullyConnectedTest<armnn::DataType::QAsymmU8>(
238 FullyConnectedTest<armnn::DataType::QSymmS16>(
251 bool transposeWeights)
253 unsigned int inputWidth = 1;
254 unsigned int inputHeight = 1;
255 unsigned int inputChannels = 5;
256 unsigned int inputNum = 2;
258 unsigned int outputChannels = 3;
259 unsigned int outputNum = 2;
267 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
268 unsigned int outputShape[] = { outputNum, outputChannels };
269 unsigned int weightsShape[] = { inputChannels, outputChannels };
271 if (transposeWeights)
273 std::swap(weightsShape[0], weightsShape[1]);
276 unsigned int biasShape[] = { outputChannels };
285 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
287 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
289 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
293 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
302 if (transposeWeights)
304 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
306 .5f, .5f, .5f, .5f, .5f,
307 2.f, 2.f, 2.f, 2.f, 2.f,
308 .5f, 1.f, 2.f, 3.f, 4.f
313 std::vector<float> biasValues({0.f, 0.f, 0.f});
316 biasValues = std::vector<float>({10.f, 20.f, 30.f});
318 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
320 result = SimpleFullyConnectedTestImpl<float>(
323 inputTensorInfo, outputTensorInfo,
324 weightsDesc, biasesDesc,
325 weights, bias, input,
326 biasEnabled, transposeWeights
329 result.
outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
331 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
332 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
333 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
335 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
336 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
337 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
347 bool transposeWeights)
349 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
const ConstCpuTensorHandle * m_Weight
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
boost::multi_array< T, n > outputExpected
void IgnoreUnused(Ts &&...)
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
LayerDescriptor m_Parameters
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
bool m_BiasEnabled
Enable/disable bias.
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 2 > FullyConnectedLargeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 2 > FullyConnectedFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, bool transposeWeights)
const ConstCpuTensorHandle * m_Bias
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)