ArmNN
 22.02
ReduceMultipleAxesTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <GraphUtils.hpp>
7 #include <TestUtils.hpp>
8 
9 #include <armnn/INetwork.hpp>
10 
11 #include <doctest/doctest.h>
12 
13 using namespace armnn;
14 
15 namespace
16 {
17 #if defined(ARMCOMPUTENEON_ENABLED)||defined(ARMCOMPUTECL_ENABLED)
18 INetworkPtr CreateSimpleReduceNetwork(ReduceDescriptor reduceDescriptor,
19  TensorShape& inputShape,
20  TensorShape& outputShape)
21 {
22  // Create a network
23  INetworkPtr network = INetwork::Create();
24 
25  const std::string layerName("reduce_layer");
26  const TensorInfo inputInfo(inputShape, DataType::Float32);
27  const TensorInfo outputInfo(outputShape, DataType::Float32);
28 
29  IConnectableLayer* const inputLayer = network->AddInputLayer(0);
30  IConnectableLayer* const reduceLayer = network->AddReduceLayer(reduceDescriptor, layerName.c_str());
31  IConnectableLayer* const outputLayer1 = network->AddOutputLayer(0);
32  IConnectableLayer* const outputLayer2 = network->AddOutputLayer(1);
33 
34  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
35  reduceLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
36 
37  inputLayer->GetOutputSlot(0).Connect(reduceLayer->GetInputSlot(0));
38  reduceLayer->GetOutputSlot(0).Connect(outputLayer1->GetInputSlot(0));
39  reduceLayer->GetOutputSlot(0).Connect(outputLayer2->GetInputSlot(0));
40 
41  return network;
42 }
43 
44 void ReduceWithMultipleAxesTest(INetworkPtr& network,
45  const TensorShape& outputShape,
46  const std::vector<float>& inputData,
47  const std::vector<float>& expectedOutput,
48  const size_t numOfAxes,
49  Compute backendId)
50 {
51  // Create ArmNN runtime
53 
54  // Optimise ArmNN network
55  IOptimizedNetworkPtr optNet = Optimize(*network, {backendId}, run->GetDeviceSpec());
56 
57  Graph& graph = GetGraphForTesting(optNet.get());
58  if (numOfAxes == 2)
59  {
60  CHECK(graph.GetNumLayers() == 5);
61  CHECK(CheckSequence(graph.cbegin(),
62  graph.cend(),
63  &IsLayerOfType<InputLayer>,
64  &IsLayerOfType<ReduceLayer>,
65  &IsLayerOfType<ReduceLayer>,
66  &IsLayerOfType<OutputLayer>,
67  &IsLayerOfType<OutputLayer>));
68  } else
69  {
70  CHECK(graph.GetNumLayers() == 6);
71  CHECK(CheckSequence(graph.cbegin(),
72  graph.cend(),
73  &IsLayerOfType<InputLayer>,
74  &IsLayerOfType<ReduceLayer>,
75  &IsLayerOfType<ReduceLayer>,
76  &IsLayerOfType<ReduceLayer>,
77  &IsLayerOfType<OutputLayer>,
78  &IsLayerOfType<OutputLayer>));
79  }
80 
81  // Get last layer in new chain, layers name follow 0, 1, 2 pattern
82  std::string layerName = "reduce_layer_" + std::to_string(numOfAxes - 1);
83  Layer* const reduceLayer = GetFirstLayerWithName(graph, layerName);
84  CHECK(reduceLayer);
85  auto reduceTensorInfo = reduceLayer->GetOutputSlot().GetTensorInfo();
86 
87  // Tensorshape and the data type are correct
88  CHECK((reduceTensorInfo.GetShape() == outputShape));
89  CHECK((reduceTensorInfo.GetDataType() == DataType::Float32));
90 
91  // Load network into runtime
92  NetworkId networkIdentifier;
93  run->LoadNetwork(networkIdentifier, std::move(optNet));
94 
95  // Create input and output tensors
96  std::vector<float> outputData(expectedOutput.size());
97  armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
98  inputTensorInfo.SetConstant(true);
99  InputTensors inputTensors
100  {
101  {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
102  };
103  OutputTensors outputTensors
104  {
105  {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())},
106  {1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data())}
107  };
108 
109  // Run inference
110  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
111 
112  // Checks the results
113  CHECK(outputData == expectedOutput);
114 }
115 
116 void ReduceSumWithTwoAxesKeepDimsTest(Compute backendId)
117 {
118  armnn::ReduceDescriptor reduceDescriptor;
119  reduceDescriptor.m_vAxis = {1, 2};
120  reduceDescriptor.m_KeepDims = true;
122 
123  TensorShape inputShape = {1, 3, 2, 4};
124  TensorShape outputShape = {1, 1, 1, 4};
125 
126  // Construct ArmNN network
127  INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
128 
129  // Creates structures for input & output.
130  const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
131  5.0f, 6.0f, 7.0f, 8.0f,
132 
133  10.0f, 20.0f, 30.0f, 40.0f,
134  50.0f, 60.0f, 70.0f, 80.0f,
135 
136  100.0f, 200.0f, 300.0f, 400.0f,
137  500.0f, 600.0f, 700.0f, 800.0f});
138  const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
139 
140  ReduceWithMultipleAxesTest(network,
141  outputShape,
142  inputData,
143  expectedOutput,
144  reduceDescriptor.m_vAxis.size(),
145  backendId);
146 }
147 
148 void ReduceSumWithTwoAxesTest(Compute backendId)
149 {
150  armnn::ReduceDescriptor reduceDescriptor;
151  reduceDescriptor.m_vAxis = {1, 2};
152  reduceDescriptor.m_KeepDims = false;
154 
155  TensorShape inputShape = {1, 3, 2, 4};
156  TensorShape outputShape = {1, 4};
157 
158  // Construct ArmNN network
159  INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
160 
161  // Creates structures for input & output.
162  const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
163  5.0f, 6.0f, 7.0f, 8.0f,
164 
165  10.0f, 20.0f, 30.0f, 40.0f,
166  50.0f, 60.0f, 70.0f, 80.0f,
167 
168  100.0f, 200.0f, 300.0f, 400.0f,
169  500.0f, 600.0f, 700.0f, 800.0f});
170  const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
171 
172  ReduceWithMultipleAxesTest(network,
173  outputShape,
174  inputData,
175  expectedOutput,
176  reduceDescriptor.m_vAxis.size(),
177  backendId);
178 }
179 
180 void ReduceSumWithThreeAxesKeepDimsTest(Compute backendId)
181 {
182  armnn::ReduceDescriptor reduceDescriptor;
183  reduceDescriptor.m_vAxis = {0, 2, 3};
184  reduceDescriptor.m_KeepDims = true;
186 
187  TensorShape inputShape = {2, 2, 2, 2};
188  TensorShape outputShape = {1, 2, 1, 1};
189 
190  // Construct ArmNN network
191  INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
192 
193  // Creates structures for input & output.
194  const std::vector<float> inputData({1.0f, 2.0f,
195  3.0f, 4.0f,
196 
197  5.0f, 6.0f,
198  7.0f, 8.0f,
199 
200  10.0f, 20.0f,
201  30.0f, 40.0f,
202 
203  50.0f, 60.0f,
204  70.0f, 80.0f});
205  const std::vector<float> expectedOutput({110.0f, 286.0f});
206 
207  ReduceWithMultipleAxesTest(network,
208  outputShape,
209  inputData,
210  expectedOutput,
211  reduceDescriptor.m_vAxis.size(),
212  backendId);
213 }
214 
215 void ReduceSumWithThreeAxesTest(Compute backendId)
216 {
217  armnn::ReduceDescriptor reduceDescriptor;
218  reduceDescriptor.m_vAxis = {0, 2, 3};
219  reduceDescriptor.m_KeepDims = false;
221 
222  TensorShape inputShape = {2, 2, 2, 2};
223  TensorShape outputShape = {2};
224 
225  // Construct ArmNN network
226  INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
227 
228  // Creates structures for input & output.
229  const std::vector<float> inputData({1.0f, 2.0f,
230  3.0f, 4.0f,
231 
232  5.0f, 6.0f,
233  7.0f, 8.0f,
234 
235  10.0f, 20.0f,
236  30.0f, 40.0f,
237 
238  50.0f, 60.0f,
239  70.0f, 80.0f});
240  const std::vector<float> expectedOutput({110.0f, 286.0f});
241 
242  ReduceWithMultipleAxesTest(network,
243  outputShape,
244  inputData,
245  expectedOutput,
246  reduceDescriptor.m_vAxis.size(),
247  backendId);
248 }
249 #endif
250 }
251 
252 #if defined(ARMCOMPUTENEON_ENABLED)
253 TEST_SUITE("Optimizer_ReduceMultipleAxesCpu")
254 {
255 TEST_CASE("ReduceSumWithTwoAxesKeepDimsCpuAccTest")
256 {
257  ReduceSumWithTwoAxesKeepDimsTest(Compute::CpuAcc);
258 }
259 
260 TEST_CASE("ReduceSumWithTwoAxesCpuAccTest")
261 {
262  ReduceSumWithTwoAxesTest(Compute::CpuAcc);
263 }
264 
265 TEST_CASE("ReduceSumWithThreeAxesKeepDimsCpuAccTest")
266 {
267  ReduceSumWithThreeAxesKeepDimsTest(Compute::CpuAcc);
268 }
269 
270 TEST_CASE("ReduceSumWithThreeAxesCpuAccTest")
271 {
272  ReduceSumWithThreeAxesTest(Compute::CpuAcc);
273 }
274 }
275 #endif
276 
277 #if defined(ARMCOMPUTECL_ENABLED)
278 TEST_SUITE("Optimizer_ReduceMultipleAxesGpu")
279 {
280 TEST_CASE("ReduceSumWithTwoAxesKeepDimsGpuAccTest")
281 {
282  ReduceSumWithTwoAxesKeepDimsTest(Compute::GpuAcc);
283 }
284 
285 TEST_CASE("ReduceSumWithTwoAxesGpuAccTest")
286 {
287  ReduceSumWithTwoAxesTest(Compute::GpuAcc);
288 }
289 
290 TEST_CASE("ReduceSumWithThreeAxesKeepDimsGpuAccTest")
291 {
292  ReduceSumWithThreeAxesKeepDimsTest(Compute::GpuAcc);
293 }
294 
295 TEST_CASE("ReduceSumWithThreeAxesGpuAccTest")
296 {
297  ReduceSumWithThreeAxesTest(Compute::GpuAcc);
298 }
299 }
300 #endif
TEST_SUITE("TestConstTensorLayerVisitor")
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:40
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
bool m_KeepDims
if true then output shape has no change.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Copyright (c) 2021 ARM Limited and Contributors.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
Compute
The Compute enum is now deprecated and it is now being replaced by BackendId.
Definition: BackendId.hpp:21
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1680
A ReduceDescriptor for the REDUCE operators.
int NetworkId
Definition: IRuntime.hpp:25
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
GPU Execution: OpenCL: ArmCompute.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:47
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:516
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:323
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
virtual int Connect(IInputSlot &destination)=0
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:66
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:492