ArmNN
 21.08
Unsupported.cpp File Reference

Go to the source code of this file.

Functions

 TEST_SUITE ("TensorflowLiteParser_Unsupported")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "TensorflowLiteParser_Unsupported"  )

Definition at line 18 of file Unsupported.cpp.

References ARMNN_ASSERT, armnn::Float32, TensorInfo::GetDataType(), TensorShape::GetNumDimensions(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), OutputSlot::GetTensorInfo(), StandInDescriptor::m_NumInputs, StandInDescriptor::m_NumOutputs, armnn::numeric_cast(), armnn::QAsymmU8, and TEST_CASE_FIXTURE().

19 {
20 using namespace armnn;
21 
22 class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
23 {
24 public:
25  StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
26  const std::vector<TensorInfo>& outputInfos)
28  , m_InputInfos(inputInfos)
29  , m_OutputInfos(outputInfos) {}
30 
31  void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
32 
33  void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
34 
35  void VisitStandInLayer(const IConnectableLayer* layer,
36  const StandInDescriptor& descriptor,
37  const char*) override
38  {
39  unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
40  CHECK(descriptor.m_NumInputs == numInputs);
41  CHECK(layer->GetNumInputSlots() == numInputs);
42 
43  unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
44  CHECK(descriptor.m_NumOutputs == numOutputs);
45  CHECK(layer->GetNumOutputSlots() == numOutputs);
46 
47  const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
48  for (unsigned int i = 0u; i < numInputs; ++i)
49  {
50  const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
51  CHECK(connectedSlot != nullptr);
52 
53  const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
54  CHECK(inputInfo == m_InputInfos[i]);
55  }
56 
57  for (unsigned int i = 0u; i < numOutputs; ++i)
58  {
59  const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
60  CHECK(outputInfo == m_OutputInfos[i]);
61  }
62  }
63 
64 private:
65  std::vector<TensorInfo> m_InputInfos;
66  std::vector<TensorInfo> m_OutputInfos;
67 };
68 
69 class DummyCustomFixture : public ParserFlatbuffersFixture
70 {
71 public:
72  explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
73  const std::vector<TensorInfo>& outputInfos)
75  , m_StandInLayerVerifier(inputInfos, outputInfos)
76  {
77  const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputInfos.size());
78  ARMNN_ASSERT(numInputs > 0);
79 
80  const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputInfos.size());
81  ARMNN_ASSERT(numOutputs > 0);
82 
83  m_JsonString = R"(
84  {
85  "version": 3,
86  "operator_codes": [{
87  "builtin_code": "CUSTOM",
88  "custom_code": "DummyCustomOperator"
89  }],
90  "subgraphs": [ {
91  "tensors": [)";
92 
93  // Add input tensors
94  for (unsigned int i = 0u; i < numInputs; ++i)
95  {
96  const TensorInfo& inputInfo = inputInfos[i];
97  m_JsonString += R"(
98  {
99  "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
100  "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
101  "buffer": 0,
102  "name": "inputTensor)" + std::to_string(i) + R"(",
103  "quantization": {
104  "min": [ 0.0 ],
105  "max": [ 255.0 ],
106  "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
107  "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
108  }
109  },)";
110  }
111 
112  // Add output tensors
113  for (unsigned int i = 0u; i < numOutputs; ++i)
114  {
115  const TensorInfo& outputInfo = outputInfos[i];
116  m_JsonString += R"(
117  {
118  "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
119  "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
120  "buffer": 0,
121  "name": "outputTensor)" + std::to_string(i) + R"(",
122  "quantization": {
123  "min": [ 0.0 ],
124  "max": [ 255.0 ],
125  "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
126  "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
127  }
128  })";
129 
130  if (i + 1 < numOutputs)
131  {
132  m_JsonString += ",";
133  }
134  }
135 
136  const std::string inputIndices = GetIndicesAsString(0u, numInputs - 1u);
137  const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
138 
139  // Add dummy custom operator
140  m_JsonString += R"(],
141  "inputs": )" + inputIndices + R"(,
142  "outputs": )" + outputIndices + R"(,
143  "operators": [
144  {
145  "opcode_index": 0,
146  "inputs": )" + inputIndices + R"(,
147  "outputs": )" + outputIndices + R"(,
148  "builtin_options_type": 0,
149  "custom_options": [ ],
150  "custom_options_format": "FLEXBUFFERS"
151  }
152  ],
153  } ],
154  "buffers" : [
155  { },
156  { }
157  ]
158  }
159  )";
160 
161  ReadStringToBinary();
162  }
163 
164  void RunTest()
165  {
166  INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
167  network->Accept(m_StandInLayerVerifier);
168  }
169 
170 private:
171  static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
172  {
173  std::stringstream stream;
174  stream << "[ ";
175  for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
176  {
177  stream << tensorShape[i];
178  if (i + 1 < tensorShape.GetNumDimensions())
179  {
180  stream << ",";
181  }
182  stream << " ";
183  }
184  stream << "]";
185 
186  return stream.str();
187  }
188 
189  static std::string GetDataTypeAsString(DataType dataType)
190  {
191  switch (dataType)
192  {
193  case DataType::Float32: return "FLOAT32";
194  case DataType::QAsymmU8: return "UINT8";
195  default: return "UNKNOWN";
196  }
197  }
198 
199  static std::string GetIndicesAsString(unsigned int first, unsigned int last)
200  {
201  std::stringstream stream;
202  stream << "[ ";
203  for (unsigned int i = first; i <= last ; ++i)
204  {
205  stream << i;
206  if (i + 1 <= last)
207  {
208  stream << ",";
209  }
210  stream << " ";
211  }
212  stream << "]";
213 
214  return stream.str();
215  }
216 
217  StandInLayerVerifier m_StandInLayerVerifier;
218 };
219 
220 class DummyCustom1Input1OutputFixture : public DummyCustomFixture
221 {
222 public:
223  DummyCustom1Input1OutputFixture()
224  : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
225  { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
226 };
227 
228 class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
229 {
230 public:
231  DummyCustom2Inputs1OutputFixture()
232  : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
233  { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
234 };
235 
236 TEST_CASE_FIXTURE(DummyCustom1Input1OutputFixture, "UnsupportedCustomOperator1Input1Output")
237 {
238  RunTest();
239 }
240 
241 TEST_CASE_FIXTURE(DummyCustom2Inputs1OutputFixture, "UnsupportedCustomOperator2Inputs1Output")
242 {
243  RunTest();
244 }
245 
246 }
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
This layer represents an unknown operation in the input graph.
Copyright (c) 2021 ARM Limited and Contributors.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:244
uint32_t m_NumOutputs
Number of output tensors.
TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
DataType
Definition: Types.hpp:35
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
DataType GetDataType() const
Definition: Tensor.hpp:198
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A StandInDescriptor for the StandIn layer.
Visitor base class with empty implementations.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
uint32_t m_NumInputs
Number of input tensors.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:172
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63