ArmNN
 20.08
Unsupported.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "../TfLiteParser.hpp"
8 
10 #include <armnn/utility/Assert.hpp>
12 
13 #include <layers/StandInLayer.hpp>
14 
15 #include <boost/test/unit_test.hpp>
16 
17 #include <sstream>
18 #include <string>
19 #include <vector>
20 
21 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
22 
23 using namespace armnn;
24 
25 class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
26 {
27 public:
28  StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
29  const std::vector<TensorInfo>& outputInfos)
31  , m_InputInfos(inputInfos)
32  , m_OutputInfos(outputInfos) {}
33 
34  void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
35 
36  void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
37 
38  void VisitStandInLayer(const IConnectableLayer* layer,
39  const StandInDescriptor& descriptor,
40  const char*) override
41  {
42  unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
43  BOOST_CHECK(descriptor.m_NumInputs == numInputs);
44  BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
45 
46  unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
47  BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
48  BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
49 
50  const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
51  for (unsigned int i = 0u; i < numInputs; ++i)
52  {
53  const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
54  BOOST_CHECK(connectedSlot != nullptr);
55 
56  const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
57  BOOST_CHECK(inputInfo == m_InputInfos[i]);
58  }
59 
60  for (unsigned int i = 0u; i < numOutputs; ++i)
61  {
62  const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
63  BOOST_CHECK(outputInfo == m_OutputInfos[i]);
64  }
65  }
66 
67 private:
68  std::vector<TensorInfo> m_InputInfos;
69  std::vector<TensorInfo> m_OutputInfos;
70 };
71 
72 class DummyCustomFixture : public ParserFlatbuffersFixture
73 {
74 public:
75  explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
76  const std::vector<TensorInfo>& outputInfos)
78  , m_StandInLayerVerifier(inputInfos, outputInfos)
79  {
80  const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
81  ARMNN_ASSERT(numInputs > 0);
82 
83  const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
84  ARMNN_ASSERT(numOutputs > 0);
85 
86  m_JsonString = R"(
87  {
88  "version": 3,
89  "operator_codes": [{
90  "builtin_code": "CUSTOM",
91  "custom_code": "DummyCustomOperator"
92  }],
93  "subgraphs": [ {
94  "tensors": [)";
95 
96  // Add input tensors
97  for (unsigned int i = 0u; i < numInputs; ++i)
98  {
99  const TensorInfo& inputInfo = inputInfos[i];
100  m_JsonString += R"(
101  {
102  "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
103  "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
104  "buffer": 0,
105  "name": "inputTensor)" + std::to_string(i) + R"(",
106  "quantization": {
107  "min": [ 0.0 ],
108  "max": [ 255.0 ],
109  "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
110  "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
111  }
112  },)";
113  }
114 
115  // Add output tensors
116  for (unsigned int i = 0u; i < numOutputs; ++i)
117  {
118  const TensorInfo& outputInfo = outputInfos[i];
119  m_JsonString += R"(
120  {
121  "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
122  "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
123  "buffer": 0,
124  "name": "outputTensor)" + std::to_string(i) + R"(",
125  "quantization": {
126  "min": [ 0.0 ],
127  "max": [ 255.0 ],
128  "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
129  "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
130  }
131  })";
132 
133  if (i + 1 < numOutputs)
134  {
135  m_JsonString += ",";
136  }
137  }
138 
139  const std::string inputIndices = GetIndicesAsString(0u, numInputs - 1u);
140  const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
141 
142  // Add dummy custom operator
143  m_JsonString += R"(],
144  "inputs": )" + inputIndices + R"(,
145  "outputs": )" + outputIndices + R"(,
146  "operators": [
147  {
148  "opcode_index": 0,
149  "inputs": )" + inputIndices + R"(,
150  "outputs": )" + outputIndices + R"(,
151  "builtin_options_type": 0,
152  "custom_options": [ ],
153  "custom_options_format": "FLEXBUFFERS"
154  }
155  ],
156  } ],
157  "buffers" : [
158  { },
159  { }
160  ]
161  }
162  )";
163 
164  ReadStringToBinary();
165  }
166 
167  void RunTest()
168  {
169  INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
170  network->Accept(m_StandInLayerVerifier);
171  }
172 
173 private:
174  static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
175  {
176  std::stringstream stream;
177  stream << "[ ";
178  for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
179  {
180  stream << tensorShape[i];
181  if (i + 1 < tensorShape.GetNumDimensions())
182  {
183  stream << ",";
184  }
185  stream << " ";
186  }
187  stream << "]";
188 
189  return stream.str();
190  }
191 
192  static std::string GetDataTypeAsString(DataType dataType)
193  {
194  switch (dataType)
195  {
196  case DataType::Float32: return "FLOAT32";
197  case DataType::QAsymmU8: return "UINT8";
198  default: return "UNKNOWN";
199  }
200  }
201 
202  static std::string GetIndicesAsString(unsigned int first, unsigned int last)
203  {
204  std::stringstream stream;
205  stream << "[ ";
206  for (unsigned int i = first; i <= last ; ++i)
207  {
208  stream << i;
209  if (i + 1 <= last)
210  {
211  stream << ",";
212  }
213  stream << " ";
214  }
215  stream << "]";
216 
217  return stream.str();
218  }
219 
220  StandInLayerVerifier m_StandInLayerVerifier;
221 };
222 
223 class DummyCustom1Input1OutputFixture : public DummyCustomFixture
224 {
225 public:
226  DummyCustom1Input1OutputFixture()
227  : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
228  { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
229 };
230 
231 class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
232 {
233 public:
234  DummyCustom2Inputs1OutputFixture()
235  : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
236  { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
237 };
238 
239 BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
240 {
241  RunTest();
242 }
243 
244 BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
245 {
246  RunTest();
247 }
248 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
int RunTest(const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool dequantizeOuput, bool enableProfiling, bool enableFp16TurboMode, bool enableBf16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, bool inferOutputShape=false, const size_t iterations=1, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
This layer represents an unknown operation in the input graph.
Copyright (c) 2020 ARM Limited.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:194
uint32_t m_NumOutputs
Number of output tensors.
DataType
Definition: Types.hpp:32
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:470
float GetQuantizationScale() const
Definition: Tensor.cpp:453
DataType GetDataType() const
Definition: Tensor.hpp:194
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A StandInDescriptor for the StandIn layer.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
Visitor base class with empty implementations.
BOOST_AUTO_TEST_SUITE_END()
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:175
uint32_t m_NumInputs
Number of input tensors.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63