15 #include <doctest/doctest.h> 34 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
35 Connect(weightsInputLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
36 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
56 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
57 Connect(weightsInputLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
58 Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
59 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
79 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
80 Connect(weightsLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
81 Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
82 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
99 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
101 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
121 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
122 Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
123 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
138 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
139 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
157 Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
158 Connect(weightsLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
159 Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
164 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
165 void FullyConnectedWithDynamicWeightsEndToEnd(
const std::vector<armnn::BackendId>& backends)
167 using namespace armnn;
188 std::vector<T> inputData {
193 std::vector<T> weightsData {
194 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
195 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
198 std::vector<T> floatExpectedOutputData {
201 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData);
203 armnn::INetworkPtr network = CreateFullyConnectedNetworkNonConstWeights(inputTensorInfo,
210 std::map<int, std::vector<T>> inputTensorData = {{ 0, inputData }, {1, weightsData}};
211 std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
213 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
215 expectedOutputTensorData,
220 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
221 void FullyConnectedWithDynamicOrConstantInputsEndToEnd(
const std::vector<armnn::BackendId>& backends,
222 const bool transposeWeights,
223 const bool constantWeightsOrBias)
225 unsigned int inputWidth = 1;
226 unsigned int inputHeight = 1;
227 unsigned int inputChannels = 5;
228 unsigned int inputNum = 2;
230 unsigned int outputChannels = 3;
231 unsigned int outputNum = 2;
233 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
234 unsigned int outputShape[] = { outputNum, outputChannels };
235 unsigned int weightsShape[] = { inputChannels, outputChannels };
237 if (transposeWeights)
239 std::swap(weightsShape[0], weightsShape[1]);
242 unsigned int biasShape[] = { outputChannels };
249 std::vector<float> input =
251 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
252 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
255 std::vector<float> weights =
264 if (transposeWeights)
268 .5f, .5f, .5f, .5f, .5f,
269 2.f, 2.f, 2.f, 2.f, 2.f,
270 .5f, 1.f, 2.f, 3.f, 4.f
274 std::vector<float> biasValues = std::vector<float>({10.f, 20.f, 30.f});
276 std::vector<float> expectedOutput =
278 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
279 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
280 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
282 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
283 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
284 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
292 if (!constantWeightsOrBias)
295 ConstTensor biasConstantTensor(biasesDesc, biasValues.data());
297 armnn::INetworkPtr network = CreateFullyConnectedNetworkNonConstWeightsConstBias(inputTensorInfo,
305 std::map<int, std::vector<T>> inputTensorData = {{ 0, input }, {1, weights}};
306 std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
308 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
310 expectedOutputTensorData,
317 ConstTensor weightsConstantTensor(weightsDesc, weights.data());
319 armnn::INetworkPtr network = CreateFullyConnectedNetworkConstWeightsNonConstBias(inputTensorInfo,
323 weightsConstantTensor,
327 std::map<int, std::vector<T>> inputTensorData = {{ 0, input }, {2, biasValues}};
328 std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
330 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
332 expectedOutputTensorData,
338 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
339 void FullyConnectedErrorChecking(
const std::vector<armnn::BackendId>& backends,
340 const bool explicitCheck,
341 const bool biasEnabled,
342 const bool connectedWeights,
343 const bool connectedBias,
344 const bool tensorInfoSet)
346 unsigned int inputWidth = 1;
347 unsigned int inputHeight = 1;
348 unsigned int inputChannels = 5;
349 unsigned int inputNum = 2;
351 unsigned int outputChannels = 3;
352 unsigned int outputNum = 2;
354 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
355 unsigned int outputShape[] = { outputNum, outputChannels };
356 unsigned int weightsShape[] = { inputChannels, outputChannels };
358 unsigned int biasShape[] = { outputChannels };
365 std::vector<float> weights =
383 CreateFullyConnectedNetworkNoConnectedWeightsExplicit(inputTensorInfo,
387 FAIL(
"LayerValidationException should have been thrown");
391 CHECK(strcmp(exc.
what(),
"Tried to connect bias to FullyConnected layer when bias is not enabled: " 392 "Failed to connect to input slot 2 on FullyConnected layer " 393 "\"Fully_Connected\" as the slot does not exist or is unavailable") == 0);
396 else if (!connectedWeights)
398 armnn::INetworkPtr network = CreateFullyConnectedNetworkNoConnectedWeightsExplicit(inputTensorInfo,
410 else if (!connectedBias)
413 ConstTensor weightsConstantTensor(weightsDesc, weights.data());
415 armnn::INetworkPtr network = CreateFullyConnectedNetworkNoConnectedBiasExplicit(inputTensorInfo,
418 weightsConstantTensor,
429 else if(!connectedWeights && !connectedBias)
431 armnn::INetworkPtr network = CreateFullyConnectedNetworkNoConnectedWeightsAndBias(inputTensorInfo,
442 else if(!tensorInfoSet)
445 ConstTensor weightsConstantTensor(weightsDesc, weights.data());
447 armnn::INetworkPtr network = CreateFullyConnectedNetworkNoTensorInfoConstWeights(inputTensorInfo,
449 weightsConstantTensor,
459 Optimize(*network, backends, runtime->GetDeviceSpec());
460 FAIL(
"LayerValidationException should have been thrown");
464 CHECK(strcmp(exc.
what(),
"Output slot TensorInfo not set on Constant layer \"Weights\"") == 0);
static IRuntimePtr Create(const CreationOptions &options)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
virtual const char * what() const noexcept override
Copyright (c) 2021 ARM Limited and Contributors.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
void SetQuantizationScale(float scale)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void SetQuantizationOffset(int32_t offset)
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
bool m_ConstantWeights
Enable/disable constant weights and biases.