ArmNN
 22.08
Unsupported.cpp File Reference
#include "ParserFlatbuffersFixture.hpp"
#include <armnn/StrategyBase.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <layers/StandInLayer.hpp>
#include <sstream>
#include <vector>

Go to the source code of this file.

Functions

 TEST_SUITE ("TensorflowLiteParser_Unsupported")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "TensorflowLiteParser_Unsupported"  )

Definition at line 18 of file Unsupported.cpp.

References ARMNN_ASSERT, armnn::Float32, TensorInfo::GetDataType(), armnn::GetLayerTypeAsCString(), TensorShape::GetNumDimensions(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), OutputSlot::GetTensorInfo(), IConnectableLayer::GetType(), armnn::IgnoreUnused(), armnn::numeric_cast(), armnn::QAsymmU8, armnn::StandIn, and TEST_CASE_FIXTURE().

19 {
20 using namespace armnn;
21 
22 class StandInLayerVerifier : public StrategyBase<NoThrowStrategy>
23 {
24 public:
25  StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
26  const std::vector<TensorInfo>& outputInfos)
27  : m_InputInfos(inputInfos)
28  , m_OutputInfos(outputInfos) {}
29 
30  void ExecuteStrategy(const armnn::IConnectableLayer* layer,
31  const armnn::BaseDescriptor& descriptor,
32  const std::vector<armnn::ConstTensor>& constants,
33  const char* name,
34  const armnn::LayerBindingId id = 0) override
35  {
36  armnn::IgnoreUnused(descriptor, constants, id);
37  switch (layer->GetType())
38  {
40  {
41  auto standInDescriptor = static_cast<const armnn::StandInDescriptor&>(descriptor);
42  unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
43  CHECK(standInDescriptor.m_NumInputs == numInputs);
44  CHECK(layer->GetNumInputSlots() == numInputs);
45 
46  unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
47  CHECK(standInDescriptor.m_NumOutputs == numOutputs);
48  CHECK(layer->GetNumOutputSlots() == numOutputs);
49 
50  const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
51  for (unsigned int i = 0u; i < numInputs; ++i)
52  {
53  const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
54  CHECK(connectedSlot != nullptr);
55 
56  const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
57  CHECK(inputInfo == m_InputInfos[i]);
58  }
59 
60  for (unsigned int i = 0u; i < numOutputs; ++i)
61  {
62  const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
63  CHECK(outputInfo == m_OutputInfos[i]);
64  }
65  break;
66  }
67  default:
68  {
69  m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
70  }
71  }
72  }
73 
74 private:
75  std::vector<TensorInfo> m_InputInfos;
76  std::vector<TensorInfo> m_OutputInfos;
77 };
78 
79 class DummyCustomFixture : public ParserFlatbuffersFixture
80 {
81 public:
82  explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
83  const std::vector<TensorInfo>& outputInfos)
85  , m_StandInLayerVerifier(inputInfos, outputInfos)
86  {
87  const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputInfos.size());
88  ARMNN_ASSERT(numInputs > 0);
89 
90  const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputInfos.size());
91  ARMNN_ASSERT(numOutputs > 0);
92 
93  m_JsonString = R"(
94  {
95  "version": 3,
96  "operator_codes": [{
97  "builtin_code": "CUSTOM",
98  "custom_code": "DummyCustomOperator"
99  }],
100  "subgraphs": [ {
101  "tensors": [)";
102 
103  // Add input tensors
104  for (unsigned int i = 0u; i < numInputs; ++i)
105  {
106  const TensorInfo& inputInfo = inputInfos[i];
107  m_JsonString += R"(
108  {
109  "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
110  "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
111  "buffer": 0,
112  "name": "inputTensor)" + std::to_string(i) + R"(",
113  "quantization": {
114  "min": [ 0.0 ],
115  "max": [ 255.0 ],
116  "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
117  "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
118  }
119  },)";
120  }
121 
122  // Add output tensors
123  for (unsigned int i = 0u; i < numOutputs; ++i)
124  {
125  const TensorInfo& outputInfo = outputInfos[i];
126  m_JsonString += R"(
127  {
128  "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
129  "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
130  "buffer": 0,
131  "name": "outputTensor)" + std::to_string(i) + R"(",
132  "quantization": {
133  "min": [ 0.0 ],
134  "max": [ 255.0 ],
135  "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
136  "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
137  }
138  })";
139 
140  if (i + 1 < numOutputs)
141  {
142  m_JsonString += ",";
143  }
144  }
145 
146  const std::string inputIndices = GetIndicesAsString(0u, numInputs - 1u);
147  const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
148 
149  // Add dummy custom operator
150  m_JsonString += R"(],
151  "inputs": )" + inputIndices + R"(,
152  "outputs": )" + outputIndices + R"(,
153  "operators": [
154  {
155  "opcode_index": 0,
156  "inputs": )" + inputIndices + R"(,
157  "outputs": )" + outputIndices + R"(,
158  "builtin_options_type": 0,
159  "custom_options": [ ],
160  "custom_options_format": "FLEXBUFFERS"
161  }
162  ],
163  } ],
164  "buffers" : [
165  { },
166  { }
167  ]
168  }
169  )";
170 
171  ReadStringToBinary();
172  }
173 
174  void RunTest()
175  {
176  INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
177  network->ExecuteStrategy(m_StandInLayerVerifier);
178  }
179 
180 private:
181  static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
182  {
183  std::stringstream stream;
184  stream << "[ ";
185  for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
186  {
187  stream << tensorShape[i];
188  if (i + 1 < tensorShape.GetNumDimensions())
189  {
190  stream << ",";
191  }
192  stream << " ";
193  }
194  stream << "]";
195 
196  return stream.str();
197  }
198 
199  static std::string GetDataTypeAsString(DataType dataType)
200  {
201  switch (dataType)
202  {
203  case DataType::Float32: return "FLOAT32";
204  case DataType::QAsymmU8: return "UINT8";
205  default: return "UNKNOWN";
206  }
207  }
208 
209  static std::string GetIndicesAsString(unsigned int first, unsigned int last)
210  {
211  std::stringstream stream;
212  stream << "[ ";
213  for (unsigned int i = first; i <= last ; ++i)
214  {
215  stream << i;
216  if (i + 1 <= last)
217  {
218  stream << ",";
219  }
220  stream << " ";
221  }
222  stream << "]";
223 
224  return stream.str();
225  }
226 
227  StandInLayerVerifier m_StandInLayerVerifier;
228 };
229 
230 class DummyCustom1Input1OutputFixture : public DummyCustomFixture
231 {
232 public:
233  DummyCustom1Input1OutputFixture()
234  : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
235  { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
236 };
237 
238 class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
239 {
240 public:
241  DummyCustom2Inputs1OutputFixture()
242  : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
243  { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
244 };
245 
246 TEST_CASE_FIXTURE(DummyCustom1Input1OutputFixture, "UnsupportedCustomOperator1Input1Output")
247 {
248  RunTest();
249 }
250 
251 TEST_CASE_FIXTURE(DummyCustom2Inputs1OutputFixture, "UnsupportedCustomOperator2Inputs1Output")
252 {
253  RunTest();
254 }
255 
256 }
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
This layer represents an unknown operation in the input graph.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
Base class for all descriptors.
Definition: Descriptors.hpp:22
Strategy base class with empty implementations.
TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
DataType
Definition: Types.hpp:48
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
float GetQuantizationScale() const
Definition: Tensor.cpp:461
DataType GetDataType() const
Definition: Tensor.hpp:198
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A StandInDescriptor for the StandIn layer.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:238
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
const char * GetLayerTypeAsCString(LayerType type)