aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/FoldPadTests.cpp
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2022-04-07 11:32:00 +0100
committerRyan OShea <ryan.oshea3@arm.com>2022-05-19 11:05:15 +0100
commit2cddc72f7aa1eab43c69250e608d662909383ba7 (patch)
tree62c531bb82b96c14469c151c3738e1e0383e5972 /src/armnn/test/optimizations/FoldPadTests.cpp
parent85edad42b8b76e76c5d969e4bc380b0e8a845c9b (diff)
downloadarmnn-2cddc72f7aa1eab43c69250e608d662909383ba7.tar.gz
IVGCVSW-6124 ConstTensorsAsInput: Conv2d - FrontEnd
* Update Front-end and Tools. * Updated Serializer, Deserializer and unit tests to reflect this. * Updated TfLiteDelegate, TfLiteParser and OnnxParser. * Updated Ref. * Fixed resulting Neon / CL tests * Unified optimizers for conv2d ops * Optimizer Fix - Fp32ToBf16 * Partial implementation for ACL backends to fix VTS failures !android-nn-driver:7477 Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I5fb18877f7ee32643e15a9818945356274bb401b
Diffstat (limited to 'src/armnn/test/optimizations/FoldPadTests.cpp')
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp27
1 files changed, 18 insertions, 9 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 9919c6d0e6..027b10377d 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -47,6 +47,12 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
std::vector<float> weightsVector(18);
ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
+ ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
+
+ TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo();
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
@@ -56,6 +62,7 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
// Connect up layers - input -> pad -> conv2d -> output
input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(1));
conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
auto checkSimpleConv2d = [](const Layer* const layer)->bool {
@@ -69,10 +76,11 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
};
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<PadLayer>,
- checkSimpleConv2d,
- &IsLayerOfType<OutputLayer>));
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PadLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ checkSimpleConv2d,
+ &IsLayerOfType<OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoConvolution2d()));
@@ -87,9 +95,10 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
};
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkPadFoldedIntoConv2d,
- &IsLayerOfType<OutputLayer>));
+ &IsLayerOfType<InputLayer>,
+ checkPadFoldedIntoConv2d,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<OutputLayer>));
}
TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
@@ -628,12 +637,12 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
-
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor,
weights,
optionalBias,
"Conv2D");
-
+ ARMNN_NO_DEPRECATE_WARN_END
TensorInfo outputInfo(4, outputShape, DataType::Float32);
conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);