ArmNN
 20.02
PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "../TestUtils.hpp"
7 
8 #include <Network.hpp>
9 #include <Optimizer.hpp>
10 
11 #include <boost/test/unit_test.hpp>
12 
13 using namespace armnn;
14 
16 using namespace armnn::optimizations;
17 
18 namespace
19 {
20 
21 /// Shared function for the below tests, so that we test the same network in both cases.
22 INetworkPtr CreateTestNetwork()
23 {
24  // Create a network
25  INetworkPtr network = INetwork::Create();
26 
27  auto input = network->AddInputLayer(0, "input");
28  const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32);
29  input->GetOutputSlot(0).SetTensorInfo(inputInfo);
30 
31  // Insert Permute which swaps batches and channels dimensions
32  auto permute = network->AddPermuteLayer(PermuteDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute");
33  const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32);
34  permute->GetOutputSlot(0).SetTensorInfo(permuteInfo);
35  input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
36 
37  // Insert BatchToSpace
38  BatchToSpaceNdDescriptor batchToSpaceDesc;
39  batchToSpaceDesc.m_BlockShape = { 2, 2 };
40  batchToSpaceDesc.m_DataLayout = DataLayout::NHWC;
41  auto batchToSpace = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace");
42  const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32);
43  batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo);
44  permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0));
45 
46  auto output = network->AddOutputLayer(0, "output");
47  batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0));
48 
49  return network;
50 }
51 
52 /// Shared function for the below tests, so that we test the same network in both cases.
53 INetworkPtr CreateTransposeTestNetwork()
54 {
55  // Create a network
56  INetworkPtr network = INetwork::Create();
57 
58  auto input = network->AddInputLayer(0, "input");
59  const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32);
60  input->GetOutputSlot(0).SetTensorInfo(inputInfo);
61 
62  // Insert Permute which swaps batches and channels dimensions
63  auto permute = network->AddTransposeLayer(TransposeDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute");
64  const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32);
65  permute->GetOutputSlot(0).SetTensorInfo(permuteInfo);
66  input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
67 
68  // Insert BatchToSpace
69  BatchToSpaceNdDescriptor batchToSpaceDesc;
70  batchToSpaceDesc.m_BlockShape = { 2, 2 };
71  batchToSpaceDesc.m_DataLayout = DataLayout::NHWC;
72  auto batchToSpace = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace");
73  const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32);
74  batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo);
75  permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0));
76 
77  auto output = network->AddOutputLayer(0, "output");
78  batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0));
79 
80  return network;
81 }
82 
83 } // namespace
84 
85 /// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
86 /// Note this does not ensure the correctness of the optimization - that is done in the below test.
87 BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
88 {
89  INetworkPtr network = CreateTestNetwork();
90  Graph graph = static_cast<Network*>(network.get())->GetGraph();
91 
92  // Confirm initial graph is as we expect
93  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
94  &IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
95 
96  // Perform the optimization which should merge the two layers into a DepthToSpace
98 
99  // Check that the replacement has been made as expected
100  auto checkDepthToSpace = [](const Layer* const layer) -> bool {
101  return IsLayerOfType<DepthToSpaceLayer>(layer) &&
102  static_cast<const DepthToSpaceLayer*>(layer)->GetParameters().m_BlockSize == 2 &&
103  static_cast<const DepthToSpaceLayer*>(layer)->GetParameters().m_DataLayout == DataLayout::NHWC &&
104  layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
105  };
106 
107  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
108  &IsLayerOfType<OutputLayer>));
109 
110  // Check the new layer has the two merged layers listed as related layers
111  std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
112  BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
113 }
114 
115 /// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
116 /// Note this does not ensure the correctness of the optimization - that is done in the below test.
117 BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
118 {
119  INetworkPtr network = CreateTransposeTestNetwork();
120  Graph graph = static_cast<Network*>(network.get())->GetGraph();
121 
122  // Confirm initial graph is as we expect
123  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
124  &IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
125 
126  // Perform the optimization which should merge the two layers into a DepthToSpace
128 
129  // Check that the replacement has been made as expected
130  auto checkDepthToSpace = [](const Layer* const layer) -> bool {
131  return IsLayerOfType<DepthToSpaceLayer>(layer) &&
132  static_cast<const DepthToSpaceLayer*>(layer)->GetParameters().m_BlockSize == 2 &&
133  static_cast<const DepthToSpaceLayer*>(layer)->GetParameters().m_DataLayout == DataLayout::NHWC &&
134  layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
135  };
136 
137  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
138  &IsLayerOfType<OutputLayer>));
139 
140  // Check the new layer has the two merged layers listed as related layers
141  std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
142  BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
143 }
144 
145 // This unit test needs the reference backend, it's not available if the reference backend is not built
146 #if defined(ARMNNREF_ENABLED)
147 
148 /// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
149 /// of the network (i.e. it still produces the correct output).
150 BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
151 {
152  INetworkPtr network = CreateTestNetwork();
153 
155  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec());
156 
157  // Confirm that the optimization has actually taken place
158  const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph();
159  BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
160  &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
161 
162  // Load the graph into a runtime so we can check it produces the correct output
163  NetworkId netId;
164  runtime->LoadNetwork(netId, std::move(optimizedNetwork));
165 
166  std::vector<float> inputData{
167  // Each row here is a row of pixels where each pixel has 4 channels
168  // clang-format off
169  1.0f, 2.0f, 3.0f, 4.0f, 10.0f, 20.0f, 30.0f, 40.0f, 100.0f, 200.0f, 300.0f, 400.0f,
170  -1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f,
171  // clang-format on
172  };
173  ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData);
174  InputTensors inputs = { { 0, input } };
175  std::vector<float> outputData(4 * 6);
176  Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data());
177  OutputTensors outputs = { { 0, output } };
178  runtime->EnqueueWorkload(netId, inputs, outputs);
179 
180  // Check the output is as expected.
181  // Note this output has been generated by running the network *without* the optimization.
182  std::vector<float> expectedOutput = {
183  // Rows and columns here match exactly with the tensor, as there is only 1 channel.
184  // clang-format off
185  1.0f, 2.0f, 10.0f, 20.0f, 100.0f, 200.0f,
186  3.0f, 4.0f, 30.0f, 40.0f, 300.0f, 400.0f,
187 
188  -1.0f, -2.0f, -10.0f, -20.0f, -100.0f, -200.0f,
189  -3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f,
190  // clang-format on
191  };
192  BOOST_TEST(outputData == expectedOutput);
193 }
194 
195 /// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
196 /// of the network (i.e. it still produces the correct output).
197 BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
198 {
199  INetworkPtr network = CreateTransposeTestNetwork();
200 
202  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec());
203 
204  // Confirm that the optimization has actually taken place
205  const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph();
206  BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
207  &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
208 
209  // Load the graph into a runtime so we can check it produces the correct output
210  NetworkId netId;
211  runtime->LoadNetwork(netId, std::move(optimizedNetwork));
212 
213  std::vector<float> inputData{
214  // Each row here is a row of pixels where each pixel has 4 channels
215  // clang-format off
216  1.0f, 2.0f, 3.0f, 4.0f, 10.0f, 20.0f, 30.0f, 40.0f, 100.0f, 200.0f, 300.0f, 400.0f,
217  -1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f,
218  // clang-format on
219  };
220  ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData);
221  InputTensors inputs = { { 0, input } };
222  std::vector<float> outputData(4 * 6);
223  Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data());
224  OutputTensors outputs = { { 0, output } };
225  runtime->EnqueueWorkload(netId, inputs, outputs);
226 
227  // Check the output is as expected.
228  // Note this output has been generated by running the network *without* the optimization.
229  std::vector<float> expectedOutput = {
230  // Rows and columns here match exactly with the tensor, as there is only 1 channel.
231  // clang-format off
232  1.0f, 2.0f, 10.0f, 20.0f, 100.0f, 200.0f,
233  3.0f, 4.0f, 30.0f, 40.0f, 300.0f, 400.0f,
234 
235  -1.0f, -2.0f, -10.0f, -20.0f, -100.0f, -200.0f,
236  -3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f,
237  // clang-format on
238  };
239  BOOST_TEST(outputData == expectedOutput);
240 }
241 #endif
242 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
CPU Execution: Reference C++ kernels.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
OptimizeForConnection< TransposeLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< TransposeLayer > > TransposeAndBatchToSpaceAsDepthToSpace
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:168
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
int NetworkId
Definition: IRuntime.hpp:19
Copyright (c) 2020 ARM Limited.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:191
OptimizeForConnection< PermuteLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteLayer > > PermuteAndBatchToSpaceAsDepthToSpace
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::vector< unsigned int > m_BlockShape
Block shape values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
Private implementation of INetwork.
Definition: Network.hpp:28
BOOST_AUTO_TEST_SUITE_END()
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:20
A TransposeDescriptor for the TransposeLayer.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:170
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
static INetworkPtr Create()
Definition: Network.cpp:49
This layer represents a DepthToSpace operation.
A PermuteDescriptor for the PermuteLayer.