ArmNN
 20.05
ParserPrototxtFixture.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/IRuntime.hpp>
9 
10 #include <test/TensorHelpers.hpp>
11 
12 #include <Network.hpp>
13 #include <VerificationHelpers.hpp>
14 
15 #include <boost/format.hpp>
16 
17 #include <iomanip>
18 #include <string>
19 
20 namespace armnnUtils
21 {
22 
23 template<typename TParser>
25 {
27  : m_Parser(TParser::Create())
28  , m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
30  {
31  }
32 
33  /// Parses and loads the network defined by the m_Prototext string.
34  /// @{
35  void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName);
36  void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
37  const std::string& inputName,
38  const std::string& outputName);
39  void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
40  const armnn::TensorShape& outputTensorShape,
41  const std::string& inputName,
42  const std::string& outputName);
43  void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
44  const std::vector<std::string>& requestedOutputs);
45  void Setup();
47  const std::map<std::string,armnn::TensorShape>& inputShapes,
48  const std::vector<std::string>& requestedOutputs);
49  /// @}
50 
51  /// Executes the network with the given input tensor and checks the result against the given output tensor.
52  /// This overload assumes that the network has a single input and a single output.
53  template <std::size_t NumOutputDimensions>
54  void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData);
55 
56  /// Executes the network with the given input tensor and checks the result against the given output tensor.
57  /// Calls RunTest with output type of uint8_t for checking comparison operators.
58  template <std::size_t NumOutputDimensions>
59  void RunComparisonTest(const std::map<std::string, std::vector<float>>& inputData,
60  const std::map<std::string, std::vector<uint8_t>>& expectedOutputData);
61 
62  /// Executes the network with the given input tensors and checks the results against the given output tensors.
63  /// This overload supports multiple inputs and multiple outputs, identified by name.
64  template <std::size_t NumOutputDimensions, typename T = float>
65  void RunTest(const std::map<std::string, std::vector<float>>& inputData,
66  const std::map<std::string, std::vector<T>>& expectedOutputData);
67 
68  std::string m_Prototext;
69  std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
72 
73  /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
74  /// so they don't need to be passed to the single-input-single-output overload of RunTest().
75  /// @{
76  std::string m_SingleInputName;
77  std::string m_SingleOutputName;
78  /// @}
79 
80  /// This will store the output shape so it don't need to be passed to the single-input-single-output overload
81  /// of RunTest().
83 };
84 
85 template<typename TParser>
87  const std::string& outputName)
88 {
89  // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
90  m_SingleInputName = inputName;
91  m_SingleOutputName = outputName;
92  Setup({ }, { outputName });
93 }
94 
95 template<typename TParser>
97  const std::string& inputName,
98  const std::string& outputName)
99 {
100  // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
101  m_SingleInputName = inputName;
102  m_SingleOutputName = outputName;
103  Setup({ { inputName, inputTensorShape } }, { outputName });
104 }
105 
106 template<typename TParser>
108  const armnn::TensorShape& outputTensorShape,
109  const std::string& inputName,
110  const std::string& outputName)
111 {
112  // Stores the input name, the output name and the output tensor shape
113  // so they don't need to be passed to the single-input-single-output RunTest().
114  m_SingleInputName = inputName;
115  m_SingleOutputName = outputName;
116  m_SingleOutputShape = outputTensorShape;
117  Setup({ { inputName, inputTensorShape } }, { outputName });
118 }
119 
120 template<typename TParser>
121 void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
122  const std::vector<std::string>& requestedOutputs)
123 {
124  std::string errorMessage;
125 
126  armnn::INetworkPtr network =
127  m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
128  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
129  armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
130  if (ret != armnn::Status::Success)
131  {
132  throw armnn::Exception(boost::str(
133  boost::format("LoadNetwork failed with error: '%1%' %2%")
134  % errorMessage
135  % CHECK_LOCATION().AsString()));
136  }
137 }
138 
139 template<typename TParser>
141 {
142  std::string errorMessage;
143 
144  armnn::INetworkPtr network =
145  m_Parser->CreateNetworkFromString(m_Prototext.c_str());
146  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
147  armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
148  if (ret != armnn::Status::Success)
149  {
150  throw armnn::Exception(boost::str(
151  boost::format("LoadNetwork failed with error: '%1%' %2%")
152  % errorMessage
153  % CHECK_LOCATION().AsString()));
154  }
155 }
156 
157 template<typename TParser>
159  const std::map<std::string,armnn::TensorShape>& inputShapes,
160  const std::vector<std::string>& requestedOutputs)
161 {
162  armnn::INetworkPtr network =
163  m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
164  auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
165  return optimized;
166 }
167 
168 template<typename TParser>
169 template <std::size_t NumOutputDimensions>
170 void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData,
171  const std::vector<float>& expectedOutputData)
172 {
173  RunTest<NumOutputDimensions>({ { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } });
174 }
175 
176 template<typename TParser>
177 template <std::size_t NumOutputDimensions>
178 void ParserPrototxtFixture<TParser>::RunComparisonTest(const std::map<std::string, std::vector<float>>& inputData,
179  const std::map<std::string, std::vector<uint8_t>>&
180  expectedOutputData)
181 {
182  RunTest<NumOutputDimensions, uint8_t>(inputData, expectedOutputData);
183 }
184 
185 template<typename TParser>
186 template <std::size_t NumOutputDimensions, typename T>
187 void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData,
188  const std::map<std::string, std::vector<T>>& expectedOutputData)
189 {
190  // Sets up the armnn input tensors from the given vectors.
191  armnn::InputTensors inputTensors;
192  for (auto&& it : inputData)
193  {
194  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
195  inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
196  }
197 
198  // Allocates storage for the output tensors to be written to and sets up the armnn output tensors.
199  std::map<std::string, boost::multi_array<T, NumOutputDimensions>> outputStorage;
200  armnn::OutputTensors outputTensors;
201  for (auto&& it : expectedOutputData)
202  {
203  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
204  outputStorage.emplace(it.first, MakeTensor<T, NumOutputDimensions>(bindingInfo.second));
205  outputTensors.push_back(
206  { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
207  }
208 
209  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
210 
211  // Compares each output tensor to the expected values.
212  for (auto&& it : expectedOutputData)
213  {
214  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
215  if (bindingInfo.second.GetNumElements() != it.second.size())
216  {
217  throw armnn::Exception(
218  boost::str(boost::format("Output tensor %1% is expected to have %2% elements. "
219  "%3% elements supplied. %4%") %
220  it.first %
221  bindingInfo.second.GetNumElements() %
222  it.second.size() %
223  CHECK_LOCATION().AsString()));
224  }
225 
226  // If the expected output shape is set, the output tensor checks will be carried out.
228  {
229 
230  if (bindingInfo.second.GetShape().GetNumDimensions() == NumOutputDimensions &&
231  bindingInfo.second.GetShape().GetNumDimensions() == m_SingleOutputShape.GetNumDimensions())
232  {
233  for (unsigned int i = 0; i < m_SingleOutputShape.GetNumDimensions(); ++i)
234  {
235  if (m_SingleOutputShape[i] != bindingInfo.second.GetShape()[i])
236  {
237  throw armnn::Exception(
238  boost::str(boost::format("Output tensor %1% is expected to have %2% shape. "
239  "%3% shape supplied. %4%") %
240  it.first %
241  bindingInfo.second.GetShape() %
243  CHECK_LOCATION().AsString()));
244  }
245  }
246  }
247  else
248  {
249  throw armnn::Exception(
250  boost::str(boost::format("Output tensor %1% is expected to have %2% dimensions. "
251  "%3% dimensions supplied. %4%") %
252  it.first %
253  bindingInfo.second.GetShape().GetNumDimensions() %
254  NumOutputDimensions %
255  CHECK_LOCATION().AsString()));
256  }
257  }
258 
259  auto outputExpected = MakeTensor<T, NumOutputDimensions>(bindingInfo.second, it.second);
260  if (std::is_same<T, uint8_t>::value)
261  {
262  BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], true));
263  }
264  else
265  {
266  BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
267  }
268  }
269 }
270 
271 } // namespace armnnUtils
CPU Execution: Reference C++ kernels.
armnn::TensorShape m_SingleOutputShape
This will store the output shape so it don&#39;t need to be passed to the single-input-single-output over...
boost::test_tools::predicate_result CompareTensors(const boost::multi_array< T, n > &a, const boost::multi_array< T, n > &b, bool compareBoolean=false)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
void RunComparisonTest(const std::map< std::string, std::vector< float >> &inputData, const std::map< std::string, std::vector< uint8_t >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:191
std::unique_ptr< TParser, void(*)(TParser *parser)> m_Parser
armnn::IOptimizedNetworkPtr SetupOptimizedNetwork(const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1003
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
void RunTest(const std::vector< float > &inputData, const std::vector< float > &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
Status
enumeration
Definition: Types.hpp:26
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:573
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:146
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
Parses and loads the network defined by the m_Prototext string.
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101