aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/ConstTensorLayerVisitor.cpp
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2022-04-07 11:32:00 +0100
committerRyan OShea <ryan.oshea3@arm.com>2022-05-19 11:05:15 +0100
commit2cddc72f7aa1eab43c69250e608d662909383ba7 (patch)
tree62c531bb82b96c14469c151c3738e1e0383e5972 /src/armnn/test/ConstTensorLayerVisitor.cpp
parent85edad42b8b76e76c5d969e4bc380b0e8a845c9b (diff)
downloadarmnn-2cddc72f7aa1eab43c69250e608d662909383ba7.tar.gz
IVGCVSW-6124 ConstTensorsAsInput: Conv2d - FrontEnd
* Update Front-end and Tools. * Updated Serializer, Deserializer and unit tests to reflect this. * Updated TfLiteDelegate, TfLiteParser and OnnxParser. * Updated Ref. * Fixed resulting Neon / CL tests * Unified optimizers for conv2d ops * Optimizer Fix - Fp32ToBf16 * Partial implementation for ACL backends to fix VTS failures !android-nn-driver:7477 Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I5fb18877f7ee32643e15a9818945356274bb401b
Diffstat (limited to 'src/armnn/test/ConstTensorLayerVisitor.cpp')
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp48
1 files changed, 38 insertions, 10 deletions
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index af0581ce4c..701327b120 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -119,16 +119,22 @@ TEST_CASE("CheckConvolution2dLayer")
descriptor.m_StrideX = 2;
descriptor.m_StrideY = 3;
descriptor.m_DataLayout = DataLayout::NHWC;
+ descriptor.m_BiasEnabled = false;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConvolution2dLayerVisitor visitor(descriptor);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ weightsLayer->ExecuteStrategy(weightsVisitor);
layer->ExecuteStrategy(visitor);
}
@@ -148,11 +154,17 @@ TEST_CASE("CheckNamedConvolution2dLayer")
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConvolution2dLayerVisitor visitor(descriptor, layerName);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, layerName);
+
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ weightsLayer->ExecuteStrategy(weightsVisitor);
layer->ExecuteStrategy(visitor);
}
@@ -175,13 +187,21 @@ TEST_CASE("CheckConvolution2dLayerWithBiases")
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
- Optional<ConstTensor> optionalBiases(biases);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConstantLayerVisitor biasVisitor(biases);
+ TestConvolution2dLayerVisitor visitor(descriptor);
NetworkImpl net;
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const biasLayer = net.AddConstantLayer(biases);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor);
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
+
+ biasLayer->ExecuteStrategy(biasVisitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
layer->ExecuteStrategy(visitor);
}
@@ -205,13 +225,21 @@ TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
- Optional<ConstTensor> optionalBiases(biases);
- TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConstantLayerVisitor biasVisitor(biases);
+ TestConvolution2dLayerVisitor visitor(descriptor, layerName);
NetworkImpl net;
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const biasLayer = net.AddConstantLayer(biases);
+ IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, layerName);
- IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
+
+ biasLayer->ExecuteStrategy(biasVisitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
layer->ExecuteStrategy(visitor);
}