23 template<
typename T,
typename B>
31 boost::multi_array<T, 2>& weights,
32 boost::multi_array<B, 1>& bias,
33 boost::multi_array<T, 4>& input,
35 bool transposeWeights)
39 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
51 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
52 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
53 data.m_Weight = &weightsTensor;
54 data.m_Bias = &biasTensor;
55 data.m_Parameters.m_BiasEnabled = biasEnabled;
56 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
61 inputHandle->Allocate();
62 outputHandle->Allocate();
65 ExecuteWorkload(*workload, memoryManager);
72 template<armnn::DataType ArmnnType,
typename T>
78 constexpr
static unsigned int inputWidth = 3u;
79 constexpr
static unsigned int inputHeight = 2u;
80 constexpr
static unsigned int inputChannels = 1u;
82 constexpr
static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
84 constexpr
static unsigned int outputChannels = 2u;
86 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
88 inputTensorInfo.SetQuantizationOffset(63);
92 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
96 weightsDesc.SetQuantizationOffset(93);
99 biasesDesc.
SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
100 biasesDesc.SetQuantizationOffset(0);
104 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
111 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
113 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
114 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
118 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
120 result = SimpleFullyConnectedTestImpl<T>(
123 inputTensorInfo, outputTensorInfo,
124 weightsDesc, biasesDesc,
125 weights, bias, input,
132 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
136 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
137 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
149 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
153 bool transposeWeights,
157 unsigned int inputWidth = 1;
158 unsigned int inputHeight = 1;
159 unsigned int inputChannels = 5;
160 unsigned int inputNum = 1;
162 unsigned int outputChannels = 1;
163 unsigned int outputNum = 1;
171 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
172 unsigned int outputShape[] = { outputNum, outputChannels };
173 unsigned int weightsShape[] = { inputChannels, outputChannels };
174 if (transposeWeights)
176 std::swap(weightsShape[0], weightsShape[1]);
179 unsigned int biasShape[] = { outputChannels };
187 if(armnn::IsQuantizedType<T>())
197 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
198 armnnUtils::QuantizedVector<T>({
199 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
204 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
205 armnnUtils::QuantizedVector<T>({
206 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
211 std::vector<T> biasValues({900000.f});
212 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
214 result = SimpleFullyConnectedTestImpl<T>(
217 inputTensorInfo, outputTensorInfo,
218 weightsDesc, biasesDesc,
219 weights, bias, input,
220 true, transposeWeights
224 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
234 FullyConnectedTest<armnn::DataType::QAsymmU8>(
240 FullyConnectedTest<armnn::DataType::QSymmS16>(
253 bool transposeWeights)
255 unsigned int inputWidth = 1;
256 unsigned int inputHeight = 1;
257 unsigned int inputChannels = 5;
258 unsigned int inputNum = 2;
260 unsigned int outputChannels = 3;
261 unsigned int outputNum = 2;
269 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
270 unsigned int outputShape[] = { outputNum, outputChannels };
271 unsigned int weightsShape[] = { inputChannels, outputChannels };
273 if (transposeWeights)
275 std::swap(weightsShape[0], weightsShape[1]);
278 unsigned int biasShape[] = { outputChannels };
287 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
289 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
291 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
295 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
304 if (transposeWeights)
306 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
308 .5f, .5f, .5f, .5f, .5f,
309 2.f, 2.f, 2.f, 2.f, 2.f,
310 .5f, 1.f, 2.f, 3.f, 4.f
315 std::vector<float> biasValues({0.f, 0.f, 0.f});
318 biasValues = std::vector<float>({10.f, 20.f, 30.f});
320 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
322 result = SimpleFullyConnectedTestImpl<float>(
325 inputTensorInfo, outputTensorInfo,
326 weightsDesc, biasesDesc,
327 weights, bias, input,
328 biasEnabled, transposeWeights
331 result.
outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
333 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
334 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
335 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
337 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
338 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
339 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
349 bool transposeWeights)
351 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
boost::multi_array< T, n > outputExpected
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
#define ARMNN_NO_DEPRECATE_WARN_END
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 2 > FullyConnectedLargeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 2 > FullyConnectedFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, bool transposeWeights)
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)