ArmNN
 21.05
FullyConnectedEndToEndTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "CommonTestUtils.hpp"
8 
9 #include <ResolveType.hpp>
10 
11 #include <armnn/INetwork.hpp>
12 
14 
15 #include <boost/test/unit_test.hpp>
16 
17 #include <vector>
18 
19 namespace
20 {
21 
22 armnn::INetworkPtr CreateFullyConnectedNetworkNonConstWeights(const armnn::TensorInfo& inputTensorInfo,
23  const armnn::TensorInfo& outputTensorInfo,
24  const armnn::TensorInfo& weightsTensorInfo,
26 {
28 
29  armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input");
30  armnn::IConnectableLayer* weightsInputLayer = network->AddInputLayer(1, "Weights_Input");
31  armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor,
34  "Fully_Connected");
35  armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
36 
37  Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
38  Connect(weightsInputLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
39  Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
40 
41  return network;
42 }
43 
44 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
45 void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId>& backends)
46 {
47  using namespace armnn;
48 
49  armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 3 }, ArmnnType);
50  inputTensorInfo.SetQuantizationScale(0.1f);
51  inputTensorInfo.SetQuantizationOffset(63);
52 
53  armnn::TensorInfo outputTensorInfo({ 1, 2 }, ArmnnType);
54  outputTensorInfo.SetQuantizationScale(5.f);
55  outputTensorInfo.SetQuantizationOffset(10);
56 
57  armnn::TensorInfo weightsTensorInfo({ 2, 6 }, ArmnnType);
58  weightsTensorInfo.SetQuantizationScale(0.2f);
59  weightsTensorInfo.SetQuantizationOffset(93);
60 
61  FullyConnectedDescriptor descriptor;
62  descriptor.m_ConstantWeights = false;
63  descriptor.m_BiasEnabled = false;
64  descriptor.m_TransposeWeightMatrix = true;
65 
66  std::vector<T> inputData {
67  -1.2f, 6.1f, -3.5f,
68  18.8f, -5.5f, 2.9f
69  };
70 
71  std::vector<T> weightsData {
72  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
73  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
74  };
75 
76  std::vector<T> floatExpectedOutputData {
77  -107.04f, 110.f
78  };
79  std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData);
80 
81  armnn::INetworkPtr network = CreateFullyConnectedNetworkNonConstWeights(inputTensorInfo,
82  outputTensorInfo,
83  weightsTensorInfo,
84  descriptor);
85 
86  BOOST_TEST_CHECKPOINT("create a network");
87 
88  std::map<int, std::vector<T>> inputTensorData = {{ 0, inputData }, {1, weightsData}};
89  std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
90 
91  EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
92  inputTensorData,
93  expectedOutputTensorData,
94  backends,
95  1.0f);
96 }
97 } // anonymous namespace
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Copyright (c) 2021 ARM Limited and Contributors.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:529
bool m_ConstantWeights
Enable/disable constant weights and biases.