From ae050524109f1ce827962665436ef7430f2ac479 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Wed, 22 Mar 2023 16:48:58 +0000 Subject: IVGCVSW-7255 Update Doxygen Documentation and publish on GitHub. * Updating Doxygen documentation for 23.02 release. Signed-off-by: David Monahan Change-Id: I545574ff7664b4595d2fe6a91a3c35d2ad55df82 --- latest/_parser_prototxt_fixture_8hpp_source.xhtml | 451 ++++++++++++++++++++++ 1 file changed, 451 insertions(+) create mode 100644 latest/_parser_prototxt_fixture_8hpp_source.xhtml (limited to 'latest/_parser_prototxt_fixture_8hpp_source.xhtml') diff --git a/latest/_parser_prototxt_fixture_8hpp_source.xhtml b/latest/_parser_prototxt_fixture_8hpp_source.xhtml new file mode 100644 index 0000000000..a45d4740db --- /dev/null +++ b/latest/_parser_prototxt_fixture_8hpp_source.xhtml @@ -0,0 +1,451 @@ + + + + + + + + + + + + + +ArmNN: src/armnnUtils/ParserPrototxtFixture.hpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ParserPrototxtFixture.hpp
+
+
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017 Arm Ltd. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 
+
6 #pragma once
+
7 
+
8 #include <armnn/IRuntime.hpp>
+
9 #include <armnnTestUtils/TensorHelpers.hpp>
+
10 
+
11 #include <Network.hpp>
+
12 #include <VerificationHelpers.hpp>
+
13 
+
14 #include <doctest/doctest.h>
+
15 #include <fmt/format.h>
+
16 
+
17 #include <iomanip>
+
18 #include <string>
+
19 
+
20 namespace armnnUtils
+
21 {
+
22 
+
23 template<typename TParser>
+ +
25 {
+ +
27  : m_Parser(TParser::Create())
+
28  , m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
+ +
30  {
+
31  }
+
32 
+
33  /// Parses and loads the network defined by the m_Prototext string.
+
34  /// @{
+
35  void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName);
+
36  void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
+
37  const std::string& inputName,
+
38  const std::string& outputName);
+
39  void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
+
40  const armnn::TensorShape& outputTensorShape,
+
41  const std::string& inputName,
+
42  const std::string& outputName);
+
43  void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
+
44  const std::vector<std::string>& requestedOutputs);
+
45  void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes);
+
46  void Setup();
+ +
48  const std::map<std::string,armnn::TensorShape>& inputShapes,
+
49  const std::vector<std::string>& requestedOutputs);
+
50  /// @}
+
51 
+
52  /// Executes the network with the given input tensor and checks the result against the given output tensor.
+
53  /// This overload assumes that the network has a single input and a single output.
+
54  template <std::size_t NumOutputDimensions>
+
55  void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData);
+
56 
+
57  /// Executes the network with the given input tensor and checks the result against the given output tensor.
+
58  /// Calls RunTest with output type of uint8_t for checking comparison operators.
+
59  template <std::size_t NumOutputDimensions>
+
60  void RunComparisonTest(const std::map<std::string, std::vector<float>>& inputData,
+
61  const std::map<std::string, std::vector<uint8_t>>& expectedOutputData);
+
62 
+
63  /// Executes the network with the given input tensors and checks the results against the given output tensors.
+
64  /// This overload supports multiple inputs and multiple outputs, identified by name.
+
65  template <std::size_t NumOutputDimensions, typename T = float>
+
66  void RunTest(const std::map<std::string, std::vector<float>>& inputData,
+
67  const std::map<std::string, std::vector<T>>& expectedOutputData);
+
68 
+
69  std::string m_Prototext;
+
70  std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
+ + +
73 
+
74  /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
+
75  /// so they don't need to be passed to the single-input-single-output overload of RunTest().
+
76  /// @{
+
77  std::string m_SingleInputName;
+
78  std::string m_SingleOutputName;
+
79  /// @}
+
80 
+
81  /// This will store the output shape so it don't need to be passed to the single-input-single-output overload
+
82  /// of RunTest().
+ +
84 };
+
85 
+
86 template<typename TParser>
+ +
88  const std::string& outputName)
+
89 {
+
90  // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+
91  m_SingleInputName = inputName;
+
92  m_SingleOutputName = outputName;
+
93  Setup({ }, { outputName });
+
94 }
+
95 
+
96 template<typename TParser>
+ +
98  const std::string& inputName,
+
99  const std::string& outputName)
+
100 {
+
101  // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+
102  m_SingleInputName = inputName;
+
103  m_SingleOutputName = outputName;
+
104  Setup({ { inputName, inputTensorShape } }, { outputName });
+
105 }
+
106 
+
107 template<typename TParser>
+ +
109  const armnn::TensorShape& outputTensorShape,
+
110  const std::string& inputName,
+
111  const std::string& outputName)
+
112 {
+
113  // Stores the input name, the output name and the output tensor shape
+
114  // so they don't need to be passed to the single-input-single-output RunTest().
+
115  m_SingleInputName = inputName;
+
116  m_SingleOutputName = outputName;
+
117  m_SingleOutputShape = outputTensorShape;
+
118  Setup({ { inputName, inputTensorShape } }, { outputName });
+
119 }
+
120 
+
121 template<typename TParser>
+
122 void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
+
123  const std::vector<std::string>& requestedOutputs)
+
124 {
+
125  std::string errorMessage;
+
126 
+
127  armnn::INetworkPtr network =
+
128  m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
+
129  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
+
130  armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+
131  if (ret != armnn::Status::Success)
+
132  {
+
133  throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
+
134  errorMessage,
+
135  CHECK_LOCATION().AsString()));
+
136  }
+
137 }
+
138 
+
139 template<typename TParser>
+
140 void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes)
+
141 {
+
142  std::string errorMessage;
+
143 
+
144  armnn::INetworkPtr network =
+
145  m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes);
+
146  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
+
147  armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+
148  if (ret != armnn::Status::Success)
+
149  {
+
150  throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
+
151  errorMessage,
+
152  CHECK_LOCATION().AsString()));
+
153  }
+
154 }
+
155 
+
156 template<typename TParser>
+ +
158 {
+
159  std::string errorMessage;
+
160 
+
161  armnn::INetworkPtr network =
+
162  m_Parser->CreateNetworkFromString(m_Prototext.c_str());
+
163  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
+
164  armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+
165  if (ret != armnn::Status::Success)
+
166  {
+
167  throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
+
168  errorMessage,
+
169  CHECK_LOCATION().AsString()));
+
170  }
+
171 }
+
172 
+
173 template<typename TParser>
+ +
175  const std::map<std::string,armnn::TensorShape>& inputShapes,
+
176  const std::vector<std::string>& requestedOutputs)
+
177 {
+
178  armnn::INetworkPtr network =
+
179  m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
+
180  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
+
181  return optimized;
+
182 }
+
183 
+
184 template<typename TParser>
+
185 template <std::size_t NumOutputDimensions>
+
186 void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData,
+
187  const std::vector<float>& expectedOutputData)
+
188 {
+
189  RunTest<NumOutputDimensions>({ { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } });
+
190 }
+
191 
+
192 template<typename TParser>
+
193 template <std::size_t NumOutputDimensions>
+
194 void ParserPrototxtFixture<TParser>::RunComparisonTest(const std::map<std::string, std::vector<float>>& inputData,
+
195  const std::map<std::string, std::vector<uint8_t>>&
+
196  expectedOutputData)
+
197 {
+
198  RunTest<NumOutputDimensions, uint8_t>(inputData, expectedOutputData);
+
199 }
+
200 
+
201 template<typename TParser>
+
202 template <std::size_t NumOutputDimensions, typename T>
+
203 void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData,
+
204  const std::map<std::string, std::vector<T>>& expectedOutputData)
+
205 {
+
206  // Sets up the armnn input tensors from the given vectors.
+
207  armnn::InputTensors inputTensors;
+
208  for (auto&& it : inputData)
+
209  {
+
210  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
+
211  bindingInfo.second.SetConstant(true);
+
212  inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+
213  if (bindingInfo.second.GetNumElements() != it.second.size())
+
214  {
+
215  throw armnn::Exception(fmt::format("Input tensor {0} is expected to have {1} elements. "
+
216  "{2} elements supplied. {3}",
+
217  it.first,
+
218  bindingInfo.second.GetNumElements(),
+
219  it.second.size(),
+
220  CHECK_LOCATION().AsString()));
+
221  }
+
222  }
+
223 
+
224  // Allocates storage for the output tensors to be written to and sets up the armnn output tensors.
+
225  std::map<std::string, std::vector<T>> outputStorage;
+
226  armnn::OutputTensors outputTensors;
+
227  for (auto&& it : expectedOutputData)
+
228  {
+
229  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+
230  outputStorage.emplace(it.first, std::vector<T>(bindingInfo.second.GetNumElements()));
+
231  outputTensors.push_back(
+
232  { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+
233  }
+
234 
+
235  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
236 
+
237  // Compares each output tensor to the expected values.
+
238  for (auto&& it : expectedOutputData)
+
239  {
+
240  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
+
241  if (bindingInfo.second.GetNumElements() != it.second.size())
+
242  {
+
243  throw armnn::Exception(fmt::format("Output tensor {0} is expected to have {1} elements. "
+
244  "{2} elements supplied. {3}",
+
245  it.first,
+
246  bindingInfo.second.GetNumElements(),
+
247  it.second.size(),
+
248  CHECK_LOCATION().AsString()));
+
249  }
+
250 
+
251  // If the expected output shape is set, the output tensor checks will be carried out.
+
252  if (m_SingleOutputShape.GetNumDimensions() != 0)
+
253  {
+
254 
+
255  if (bindingInfo.second.GetShape().GetNumDimensions() == NumOutputDimensions &&
+
256  bindingInfo.second.GetShape().GetNumDimensions() == m_SingleOutputShape.GetNumDimensions())
+
257  {
+
258  for (unsigned int i = 0; i < m_SingleOutputShape.GetNumDimensions(); ++i)
+
259  {
+
260  if (m_SingleOutputShape[i] != bindingInfo.second.GetShape()[i])
+
261  {
+
262  // This exception message could not be created by fmt:format because of an oddity in
+
263  // the operator << of TensorShape.
+
264  std::stringstream message;
+
265  message << "Output tensor " << it.first << " is expected to have "
+
266  << bindingInfo.second.GetShape() << "shape. "
+
267  << m_SingleOutputShape << " shape supplied. "
+
268  << CHECK_LOCATION().AsString();
+
269  throw armnn::Exception(message.str());
+
270  }
+
271  }
+
272  }
+
273  else
+
274  {
+
275  throw armnn::Exception(fmt::format("Output tensor {0} is expected to have {1} dimensions. "
+
276  "{2} dimensions supplied. {3}",
+
277  it.first,
+
278  bindingInfo.second.GetShape().GetNumDimensions(),
+
279  NumOutputDimensions,
+
280  CHECK_LOCATION().AsString()));
+
281  }
+
282  }
+
283 
+
284  auto outputExpected = it.second;
+
285  auto shape = bindingInfo.second.GetShape();
+
286  if (std::is_same<T, uint8_t>::value)
+
287  {
+
288  auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape, true);
+
289  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+
290  }
+
291  else
+
292  {
+
293  auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape);
+
294  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+
295  }
+
296  }
+
297 }
+
298 
+
299 } // namespace armnnUtils
+
+
+ +
void RunComparisonTest(const std::map< std::string, std::vector< float >> &inputData, const std::map< std::string, std::vector< uint8_t >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
+ +
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:253
+
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+
armnn::IOptimizedNetworkPtr SetupOptimizedNetwork(const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
+
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
+ +
armnn::TensorShape m_SingleOutputShape
This will store the output shape so it don't need to be passed to the single-input-single-output over...
+ +
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
+
std::unique_ptr< TParser, void(*)(TParser *parser)> m_Parser
+
Copyright (c) 2021 ARM Limited and Contributors.
+
void RunTest(const std::vector< float > &inputData, const std::vector< float > &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
+ + + +
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
+
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
+
int NetworkId
Definition: IRuntime.hpp:35
+ +
Status
Definition: Types.hpp:42
+ + + + +
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
+
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:252
+
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
+
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > & > messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1773
+ +
@ CpuRef
CPU Execution: Reference C++ kernels.
+
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
Parses and loads the network defined by the m_Prototext string.
+
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:41
+ +
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
+ + + + -- cgit v1.2.1