aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-11-10 12:14:39 +0000
committerCathal Corbett <cathal.corbett@arm.com>2021-11-12 09:53:59 +0000
commit9f184c4cce711891a194ff05f68af6082aa920e9 (patch)
tree65d1edbd464b36d972bc5f8a2e52775b7b96d127
parent608c8366c0979a14607ec77d06771d6c994332b2 (diff)
downloadarmnn-9f184c4cce711891a194ff05f68af6082aa920e9.tar.gz
IVGCVSW-6347 Fix PyArmnn AddConstantLayer
* Added new test for AddConstantLayer to FullyConnectedLayer with actual data in test_network.py * Added support for m_ConstantWeights in FullyConnectedDescriptor in file armnn_descriptors.i * Amended AddFullyConnectedLayer methods in armnn_netowrk.i to add support for ConstTensorsAsInputs to FullyConnected. * Fixed indentation in test_network.py Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: Id25c7ef6ed2cd3de3285416d90588ccb345aa9bc
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i61
-rw-r--r--python/pyarmnn/test/test_network.py130
2 files changed, 147 insertions, 44 deletions
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index d50b841f4a..b114edd7c4 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -597,7 +597,7 @@ public:
armnn::IConnectableLayer* AddInstanceNormalizationLayer(const armnn::InstanceNormalizationDescriptor& desc,
const char* name = nullptr);
- %feature("docstring",
+ %feature("docstring",
"
Adds a Log Softmax layer to the network.
@@ -611,7 +611,7 @@ public:
armnn::IConnectableLayer* AddLogSoftmaxLayer(const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
const char* name = nullptr);
- %feature("docstring",
+ %feature("docstring",
"
Adds an L2 Normalization layer to the network.
Normalization is performed along dimension 1, but requires a 4d input.
@@ -642,7 +642,7 @@ public:
const armnn::LstmInputParams& params,
const char* name = nullptr);
- %feature("docstring",
+ %feature("docstring",
"
Add a Maximum layer to the network.
@@ -979,6 +979,20 @@ public:
") AddSwitchLayer;
armnn::IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
+ %feature("docstring",
+ "
+ Adds a Fully Connected layer to the network. Also known as a Linear or Dense layer.
+
+ Args:
+ fullyConnectedDescriptor (FullyConnectedDescriptor): Description of the fully connected layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddFullyConnectedLayer;
+ armnn::IConnectableLayer* AddFullyConnectedLayer(const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const char* name = nullptr);
+
};
%extend INetwork {
@@ -992,17 +1006,18 @@ public:
}
%feature("docstring",
- "
- Adds a Fully Connected layer to the network. Also known as a Linear or Dense layer.
+ "
+ Adds a Fully Connected layer to the network with input weights and optional bias.
+ Also known as a Linear or Dense layer.
- Args:
- fullyConnectedDescriptor (FullyConnectedDescriptor): Description of the fully connected layer.
- weights (ConstTensor): Tensor for the weights data.
- biases (ConstTensor): Optional tensor for the bias data.
- name (str): Optional name for the layer.
+ Args:
+ fullyConnectedDescriptor (FullyConnectedDescriptor): Description of the fully connected layer.
+ weights (ConstTensor): Tensor for the weights data.
+ biases (ConstTensor): Optional tensor for the bias data.
+ name (str): Optional name for the layer.
- Returns:
- IConnectableLayer: Interface for configuring the layer.
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
") AddFullyConnectedLayer;
armnn::IConnectableLayer* AddFullyConnectedLayer(const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
const armnn::ConstTensor& weights,
@@ -1020,22 +1035,22 @@ public:
}
%feature("docstring",
- "
- Adds a 2D Transpose Convolution layer to the network.
+ "
+ Adds a 2D Transpose Convolution layer to the network.
- Args:
- descriptor (TransposeConvolution2dDescriptor): Descriptor containing all parameters to configure this layer.
- weights (ConstTensor): Tensor for the weights data.
- biases (ConstTensor): Optional tensor for the bias data.
- name (str): Optional name for the layer.
+ Args:
+ descriptor (TransposeConvolution2dDescriptor): Descriptor containing all parameters to configure this layer.
+ weights (ConstTensor): Tensor for the weights data.
+ biases (ConstTensor): Optional tensor for the bias data.
+ name (str): Optional name for the layer.
- Returns:
- IConnectableLayer: Interface for configuring the layer.
- ") AddTransposeConvolution2dLayer;
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddTransposeConvolution2dLayer;
armnn::IConnectableLayer* AddTransposeConvolution2dLayer(const armnn::TransposeConvolution2dDescriptor& descriptor,
const armnn::ConstTensor& weights,
armnn::ConstTensor* biases = nullptr,
- const char* name = nullptr){
+ const char* name = nullptr) {
if (biases) {
return $self->AddTransposeConvolution2dLayer(descriptor, weights,
diff --git a/python/pyarmnn/test/test_network.py b/python/pyarmnn/test/test_network.py
index 9b2dbf7f3a..4f37c473ac 100644
--- a/python/pyarmnn/test/test_network.py
+++ b/python/pyarmnn/test/test_network.py
@@ -2,6 +2,7 @@
# SPDX-License-Identifier: MIT
import os
import stat
+import numpy as np
import pytest
import pyarmnn as ann
@@ -244,27 +245,27 @@ def test_network_method_exists(method):
def test_fullyconnected_layer_optional_none():
net = ann.INetwork()
- layer = net.AddFullyConnectedLayer(fullyConnectedDescriptor=ann.FullyConnectedDescriptor(),
- weights=ann.ConstTensor())
+ layer = net.AddFullyConnectedLayer(ann.FullyConnectedDescriptor(),
+ ann.ConstTensor())
assert layer
def test_fullyconnected_layer_optional_provided():
net = ann.INetwork()
- layer = net.AddFullyConnectedLayer(fullyConnectedDescriptor=ann.FullyConnectedDescriptor(),
- weights=ann.ConstTensor(),
- biases=ann.ConstTensor())
+ layer = net.AddFullyConnectedLayer(ann.FullyConnectedDescriptor(),
+ ann.ConstTensor(),
+ ann.ConstTensor())
assert layer
def test_fullyconnected_layer_all_args():
net = ann.INetwork()
- layer = net.AddFullyConnectedLayer(fullyConnectedDescriptor=ann.FullyConnectedDescriptor(),
- weights=ann.ConstTensor(),
- biases=ann.ConstTensor(),
- name='NAME1')
+ layer = net.AddFullyConnectedLayer(ann.FullyConnectedDescriptor(),
+ ann.ConstTensor(),
+ ann.ConstTensor(),
+ 'NAME1')
assert layer
assert 'NAME1' == layer.GetName()
@@ -273,7 +274,7 @@ def test_fullyconnected_layer_all_args():
def test_DepthwiseConvolution2d_layer_optional_none():
net = ann.INetwork()
layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
- weights=ann.ConstTensor())
+ weights=ann.ConstTensor())
assert layer
@@ -281,8 +282,8 @@ def test_DepthwiseConvolution2d_layer_optional_none():
def test_DepthwiseConvolution2d_layer_optional_provided():
net = ann.INetwork()
layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
- weights=ann.ConstTensor(),
- biases=ann.ConstTensor())
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor())
assert layer
@@ -290,9 +291,9 @@ def test_DepthwiseConvolution2d_layer_optional_provided():
def test_DepthwiseConvolution2d_layer_all_args():
net = ann.INetwork()
layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
- weights=ann.ConstTensor(),
- biases=ann.ConstTensor(),
- name='NAME1')
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor(),
+ name='NAME1')
assert layer
assert 'NAME1' == layer.GetName()
@@ -301,7 +302,7 @@ def test_DepthwiseConvolution2d_layer_all_args():
def test_Convolution2d_layer_optional_none():
net = ann.INetwork()
layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
- weights=ann.ConstTensor())
+ weights=ann.ConstTensor())
assert layer
@@ -309,8 +310,8 @@ def test_Convolution2d_layer_optional_none():
def test_Convolution2d_layer_optional_provided():
net = ann.INetwork()
layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
- weights=ann.ConstTensor(),
- biases=ann.ConstTensor())
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor())
assert layer
@@ -318,9 +319,96 @@ def test_Convolution2d_layer_optional_provided():
def test_Convolution2d_layer_all_args():
net = ann.INetwork()
layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
- weights=ann.ConstTensor(),
- biases=ann.ConstTensor(),
- name='NAME1')
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor(),
+ name='NAME1')
assert layer
assert 'NAME1' == layer.GetName()
+
+
+def test_add_constant_layer_to_fully_connected():
+
+ inputWidth = 1
+ inputHeight = 1
+ inputChannels = 5
+ inputNum = 2
+
+ outputChannels = 3
+ outputNum = 2
+
+ inputShape = ( inputNum, inputChannels, inputHeight, inputWidth )
+ outputShape = ( outputNum, outputChannels )
+ weightsShape = ( inputChannels, outputChannels )
+ biasShape = ( outputChannels, )
+
+ input = np.array([
+ [1.0, 2.0, 3.0, 4.0, 5.0],
+ [5.0, 4.0, 3.0, 2.0, 1.0]
+ ], dtype=np.float32)
+
+ weights = np.array([
+ [.5, 2., .5],
+ [.5, 2., 1.],
+ [.5, 2., 2.],
+ [.5, 2., 3.],
+ [.5, 2., 4.]
+ ], dtype=np.float32)
+
+ biasValues = np.array([10, 20, 30], dtype=np.float32)
+
+ expectedOutput = np.array([
+ [0.5 + 1.0 + 1.5 + 2.0 + 2.5 + biasValues[0],
+ 2.0 + 4.0 + 6.0 + 8.0 + 10. + biasValues[1],
+ 0.5 + 2.0 + 6.0 + 12. + 20. + biasValues[2]],
+ [2.5 + 2.0 + 1.5 + 1.0 + 0.5 + biasValues[0],
+ 10.0 + 8.0 + 6.0 + 4.0 + 2. + biasValues[1],
+ 2.5 + 4.0 + 6.0 + 6. + 4. + biasValues[2]]
+ ], dtype=np.float32)
+
+ network = ann.INetwork()
+
+ input_info = ann.TensorInfo(ann.TensorShape(inputShape), ann.DataType_Float32, 0, 0, True)
+ input_tensor = ann.ConstTensor(input_info, input)
+ input_layer = network.AddInputLayer(0, "input")
+
+ w_info = ann.TensorInfo(ann.TensorShape(weightsShape), ann.DataType_Float32, 0, 0, True)
+ w_tensor = ann.ConstTensor(w_info, weights)
+ w_layer = network.AddConstantLayer(w_tensor, "weights")
+
+ b_info = ann.TensorInfo(ann.TensorShape(biasShape), ann.DataType_Float32, 0, 0, True)
+ b_tensor = ann.ConstTensor(b_info, biasValues)
+ b_layer = network.AddConstantLayer(b_tensor, "bias")
+
+ fc_descriptor = ann.FullyConnectedDescriptor()
+ fc_descriptor.m_BiasEnabled = True
+ fc_descriptor.m_ConstantWeights = True
+ fully_connected = network.AddFullyConnectedLayer(fc_descriptor, "fc")
+
+ output_info = ann.TensorInfo(ann.TensorShape(outputShape), ann.DataType_Float32)
+ output_tensor = ann.Tensor(output_info, np.zeros([1, 1], dtype=np.float32))
+ output = network.AddOutputLayer(0, "output")
+
+ input_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(0))
+ w_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(1))
+ b_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(2))
+ fully_connected.GetOutputSlot(0).Connect(output.GetInputSlot(0))
+
+ input_layer.GetOutputSlot(0).SetTensorInfo(input_info)
+ w_layer.GetOutputSlot(0).SetTensorInfo(w_info)
+ b_layer.GetOutputSlot(0).SetTensorInfo(b_info)
+ fully_connected.GetOutputSlot(0).SetTensorInfo(output_info)
+
+ preferred_backends = [ann.BackendId('CpuRef')]
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ net_id, messages = runtime.LoadNetwork(opt_network)
+
+ input_tensors = [(0, input_tensor)]
+ output_tensors = [(0, output_tensor)]
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
+
+ assert (output_vectors==expectedOutput).all()