23 template<
typename T,
typename B>
32 std::vector<T>& weights,
34 std::vector<T>& input,
36 bool transposeWeights,
39 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.
CreateTensorHandle(weightsTensorInfo);
41 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
53 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
54 AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
55 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
58 data.m_Weight = &weightsTensor;
59 data.m_Bias = &biasTensor;
61 data.m_Parameters.m_BiasEnabled = biasEnabled;
62 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
63 data.m_Parameters.m_ConstantWeights = constantWeights;
65 std::unique_ptr<armnn::ITensorHandle> input2Handle =
nullptr;
69 AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
75 input0Handle->Allocate();
76 input1Handle->Allocate();
77 outputHandle->Allocate();
82 input2Handle->Allocate();
86 ExecuteWorkload(*workload, memoryManager);
94 template<armnn::DataType ArmnnType,
typename T>
100 bool constantWeights)
102 constexpr
static unsigned int inputWidth = 3u;
103 constexpr
static unsigned int inputHeight = 2u;
104 constexpr
static unsigned int inputChannels = 1u;
106 constexpr
static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
108 constexpr
static unsigned int outputChannels = 2u;
110 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
112 inputTensorInfo.SetQuantizationOffset(63);
116 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
120 weightsDesc.SetQuantizationOffset(93);
123 biasesDesc.
SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
124 biasesDesc.SetQuantizationOffset(0);
128 std::vector<T> input = ConvertToDataType<ArmnnType>(
135 std::vector<T> weights = ConvertToDataType<ArmnnType>(
137 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
138 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
142 std::vector<int32_t> bias = {9250, 67500};
144 result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
160 result.
m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
164 result.
m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
176 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
181 bool transposeWeights,
185 unsigned int inputWidth = 1;
186 unsigned int inputHeight = 1;
187 unsigned int inputChannels = 5;
188 unsigned int inputNum = 1;
190 unsigned int outputChannels = 1;
191 unsigned int outputNum = 1;
199 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
200 unsigned int outputShape[] = { outputNum, outputChannels };
201 unsigned int weightsShape[] = { inputChannels, outputChannels };
202 if (transposeWeights)
204 std::swap(weightsShape[0], weightsShape[1]);
207 unsigned int biasShape[] = { outputChannels };
215 if(armnn::IsQuantizedType<T>())
225 std::vector<T> input = armnnUtils::QuantizedVector<T>(
227 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
231 std::vector<T> weights = armnnUtils::QuantizedVector<T>(
233 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
237 std::vector<T> biasValues({900000.f});
239 result = SimpleFullyConnectedTestImpl<T>(
243 inputTensorInfo, outputTensorInfo,
244 weightsDesc, biasesDesc,
245 weights, biasValues, input,
246 true, transposeWeights, true
249 result.
m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
259 FullyConnectedTest<armnn::DataType::QAsymmU8>(
267 FullyConnectedTest<armnn::DataType::QSymmS16>(
283 bool transposeWeights)
285 unsigned int inputWidth = 1;
286 unsigned int inputHeight = 1;
287 unsigned int inputChannels = 5;
288 unsigned int inputNum = 2;
290 unsigned int outputChannels = 3;
291 unsigned int outputNum = 2;
299 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
300 unsigned int outputShape[] = { outputNum, outputChannels };
301 unsigned int weightsShape[] = { inputChannels, outputChannels };
303 if (transposeWeights)
305 std::swap(weightsShape[0], weightsShape[1]);
308 unsigned int biasShape[] = { outputChannels };
317 std::vector<float> input =
319 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
320 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
323 std::vector<float> weights =
332 if (transposeWeights)
336 .5f, .5f, .5f, .5f, .5f,
337 2.f, 2.f, 2.f, 2.f, 2.f,
338 .5f, 1.f, 2.f, 3.f, 4.f
342 std::vector<float> biasValues({0.f, 0.f, 0.f});
345 biasValues = std::vector<float>({10.f, 20.f, 30.f});
348 result = SimpleFullyConnectedTestImpl<float>(
352 inputTensorInfo, outputTensorInfo,
353 weightsDesc, biasesDesc,
354 weights, biasValues, input,
355 biasEnabled, transposeWeights, true
358 std::vector<float> expectedOutput =
360 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
361 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
362 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
364 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
365 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
366 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
377 bool transposeWeights)
379 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
LayerTestResult< float, 2 > FullyConnectedFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, std::vector< T > &weights, std::vector< B > &bias, std::vector< T > &input, bool biasEnabled, bool transposeWeights, bool constantWeights)
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< T > m_ExpectedData
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< T > m_ActualData
LayerTestResult< float, 2 > FullyConnectedLargeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)