diff options
author | Keith Davis <keith.davis@arm.com> | 2022-04-07 11:32:00 +0100 |
---|---|---|
committer | Keith Davis <keith.davis@arm.com> | 2022-05-16 16:08:54 +0100 |
commit | b4dd5cc86d4eb841de670f0f102ede599e0d9c40 (patch) | |
tree | 77857cf739baecaf63701b66c1a2646b7930a834 /src/armnn/test/optimizations/FoldPadTests.cpp | |
parent | b86ec6641b4b06ccddad5eebbc21010d6184fe79 (diff) | |
download | armnn-b4dd5cc86d4eb841de670f0f102ede599e0d9c40.tar.gz |
IVGCVSW-6124 ConstTensorsAsInput: Conv2d - FrontEnd
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Updated Ref.
* Fixed resulting Neon / CL tests
* Unified optimizers for conv2d ops
* Optimizer Fix - Fp32ToBf16
* Partial implementation for ACL backends to fix VTS failures
!android-nn-driver:7477
Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I5fb18877f7ee32643e15a9818945356274bb401b
Diffstat (limited to 'src/armnn/test/optimizations/FoldPadTests.cpp')
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 27 |
1 files changed, 18 insertions, 9 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 9919c6d0e6..027b10377d 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -47,6 +47,12 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer") std::vector<float> weightsVector(18); ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector); + ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights"); + weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights); + + TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo(); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo); + Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d"); conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights); conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); @@ -56,6 +62,7 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer") // Connect up layers - input -> pad -> conv2d -> output input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0)); + weightsLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(1)); conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); auto checkSimpleConv2d = [](const Layer* const layer)->bool { @@ -69,10 +76,11 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer") }; CHECK(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<InputLayer>, - &IsLayerOfType<PadLayer>, - checkSimpleConv2d, - &IsLayerOfType<OutputLayer>)); + &IsLayerOfType<InputLayer>, + &IsLayerOfType<PadLayer>, + &IsLayerOfType<ConstantLayer>, + checkSimpleConv2d, + &IsLayerOfType<OutputLayer>)); armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoConvolution2d())); @@ -87,9 +95,10 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer") }; CHECK(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<InputLayer>, - checkPadFoldedIntoConv2d, - &IsLayerOfType<OutputLayer>)); + &IsLayerOfType<InputLayer>, + checkPadFoldedIntoConv2d, + &IsLayerOfType<ConstantLayer>, + &IsLayerOfType<OutputLayer>)); } TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer") @@ -628,12 +637,12 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias); - + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, weights, optionalBias, "Conv2D"); - + ARMNN_NO_DEPRECATE_WARN_END TensorInfo outputInfo(4, outputShape, DataType::Float32); conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); |