ArmNN
 24.02
LayerSupportRules.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 #include <algorithm>
10 
11 namespace armnn
12 {
13 
15 {
16  if (!weightsType)
17  {
18  return weightsType;
19  }
20 
21  switch(weightsType.value())
22  {
25  return weightsType;
31  default:
32  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
33  }
34  return armnn::EmptyOptional();
35 }
36 
37 template<typename F>
38 bool CheckSupportRule(F rule, Optional<std::string&> reasonIfUnsupported, const char* reason)
39 {
40  bool supported = rule();
41  if (!supported && reason)
42  {
43  reasonIfUnsupported.value() += std::string(reason) + "\n"; // Append the reason on a new line
44  }
45  return supported;
46 }
47 
48 struct Rule
49 {
50  bool operator()() const
51  {
52  return m_Res;
53  }
54 
55  bool m_Res = true;
56 };
57 
58 template<typename T>
60 {
61  return true;
62 }
63 
64 template<typename T, typename... Rest>
65 bool AllTypesAreEqualImpl(T t1, T t2, Rest... rest)
66 {
67  static_assert(std::is_same<T, TensorInfo>::value, "Type T must be a TensorInfo");
68 
69  return (t1.GetDataType() == t2.GetDataType()) && AllTypesAreEqualImpl(t2, rest...);
70 }
71 
72 struct TypesAreEqual : public Rule
73 {
74  template<typename ... Ts>
75  TypesAreEqual(const Ts&... ts)
76  {
77  m_Res = AllTypesAreEqualImpl(ts...);
78  }
79 };
80 
82 {
84  {
85  m_Res = info0.GetQuantizationScale() == info1.GetQuantizationScale() &&
87  }
88 };
89 
90 struct TypeAnyOf : public Rule
91 {
92  template<typename Container>
93  TypeAnyOf(const TensorInfo& info, const Container& c)
94  {
95  m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt)
96  {
97  return dt == info.GetDataType();
98  });
99  }
100 };
101 
102 struct TypeIs : public Rule
103 {
104  TypeIs(const TensorInfo& info, DataType dt)
105  {
106  m_Res = dt == info.GetDataType();
107  }
108 };
109 
111 {
113  {
114  m_Res = !info.IsQuantized() || !info.HasPerAxisQuantization();
115  }
116 };
117 
119 {
120  BiasAndWeightsTypesMatch(const TensorInfo& biases, const TensorInfo& weights)
121  {
122  m_Res = biases.GetDataType() == GetBiasTypeFromWeightsType(weights.GetDataType()).value();
123  }
124 };
125 
127 {
128  template<typename Container>
129  BiasAndWeightsTypesCompatible(const TensorInfo& info, const Container& c)
130  {
131  m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt)
132  {
133  return dt == GetBiasTypeFromWeightsType(info.GetDataType()).value();
134  });
135  }
136 };
137 
138 struct ShapesAreSameRank : public Rule
139 {
140  ShapesAreSameRank(const TensorInfo& info0, const TensorInfo& info1)
141  {
142  m_Res = info0.GetShape().GetNumDimensions() == info1.GetShape().GetNumDimensions();
143  }
144 };
145 
147 {
148  ShapesAreSameTotalSize(const TensorInfo& info0, const TensorInfo& info1)
149  {
150  m_Res = info0.GetNumElements() == info1.GetNumElements();
151  }
152 };
153 
155 {
156  unsigned int CalcInputSize(const TensorShape& in, const TensorShape& out, unsigned int idx)
157  {
158  unsigned int offset = out.GetNumDimensions() - in.GetNumDimensions();
159  unsigned int sizeIn = (idx < offset) ? 1 : in[idx-offset];
160  return sizeIn;
161  }
162 
163  ShapesAreBroadcastCompatible(const TensorInfo& in0, const TensorInfo& in1, const TensorInfo& out)
164  {
165  const TensorShape& shape0 = in0.GetShape();
166  const TensorShape& shape1 = in1.GetShape();
167  const TensorShape& outShape = out.GetShape();
168 
169  for (unsigned int i=0; i < outShape.GetNumDimensions() && m_Res; i++)
170  {
171  unsigned int sizeOut = outShape[i];
172  unsigned int sizeIn0 = CalcInputSize(shape0, outShape, i);
173  unsigned int sizeIn1 = CalcInputSize(shape1, outShape, i);
174 
175  m_Res &= ((sizeIn0 == sizeOut) || (sizeIn0 == 1)) &&
176  ((sizeIn1 == sizeOut) || (sizeIn1 == 1));
177  }
178  }
179 };
180 
182 {
183  TensorNumDimensionsAreCorrect(const TensorInfo& info, unsigned int expectedNumDimensions)
184  {
185  m_Res = info.GetNumDimensions() == expectedNumDimensions;
186  }
187 };
188 
190 {
191  TensorNumDimensionsAreGreaterOrEqualTo(const TensorInfo& info, unsigned int numDimensionsToCompare)
192  {
193  m_Res = info.GetNumDimensions() >= numDimensionsToCompare;
194  }
195 };
196 
197 } //namespace armnn
armnn::Rule::m_Res
bool m_Res
Definition: LayerSupportRules.hpp:55
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:198
armnn::Rule::operator()
bool operator()() const
Definition: LayerSupportRules.hpp:50
armnn::Optional
Definition: Optional.hpp:270
armnn::ShapesAreSameRank::ShapesAreSameRank
ShapesAreSameRank(const TensorInfo &info0, const TensorInfo &info1)
Definition: LayerSupportRules.hpp:140
armnn::BiasAndWeightsTypesMatch
Definition: LayerSupportRules.hpp:118
armnn::BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch
BiasAndWeightsTypesMatch(const TensorInfo &biases, const TensorInfo &weights)
Definition: LayerSupportRules.hpp:120
armnn::Rule
Definition: LayerSupportRules.hpp:48
armnn::TensorNumDimensionsAreGreaterOrEqualTo::TensorNumDimensionsAreGreaterOrEqualTo
TensorNumDimensionsAreGreaterOrEqualTo(const TensorInfo &info, unsigned int numDimensionsToCompare)
Definition: LayerSupportRules.hpp:191
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::DataType::Float32
@ Float32
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:14
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::DataType::QSymmS8
@ QSymmS8
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::QuantizationParametersAreEqual::QuantizationParametersAreEqual
QuantizationParametersAreEqual(const TensorInfo &info0, const TensorInfo &info1)
Definition: LayerSupportRules.hpp:83
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ShapesAreSameRank
Definition: LayerSupportRules.hpp:138
Assert.hpp
armnn::TypeNotPerAxisQuantized
Definition: LayerSupportRules.hpp:110
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect
TensorNumDimensionsAreCorrect(const TensorInfo &info, unsigned int expectedNumDimensions)
Definition: LayerSupportRules.hpp:183
armnn::TypesAreEqual::TypesAreEqual
TypesAreEqual(const Ts &... ts)
Definition: LayerSupportRules.hpp:75
armnn::DataType::Float16
@ Float16
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::ShapesAreBroadcastCompatible
Definition: LayerSupportRules.hpp:154
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::TensorNumDimensionsAreCorrect
Definition: LayerSupportRules.hpp:181
armnn::BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible
BiasAndWeightsTypesCompatible(const TensorInfo &info, const Container &c)
Definition: LayerSupportRules.hpp:129
armnn::BoostLogSeverityMapping::info
@ info
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::AllTypesAreEqualImpl
bool AllTypesAreEqualImpl(T)
Definition: LayerSupportRules.hpp:59
armnn::TypeIs
Definition: LayerSupportRules.hpp:102
armnn::DataType::Signed32
@ Signed32
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::BiasAndWeightsTypesCompatible
Definition: LayerSupportRules.hpp:126
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::ShapesAreBroadcastCompatible::CalcInputSize
unsigned int CalcInputSize(const TensorShape &in, const TensorShape &out, unsigned int idx)
Definition: LayerSupportRules.hpp:156
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::TypesAreEqual
Definition: LayerSupportRules.hpp:72
armnn::ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible
ShapesAreBroadcastCompatible(const TensorInfo &in0, const TensorInfo &in1, const TensorInfo &out)
Definition: LayerSupportRules.hpp:163
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:38
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::TypeAnyOf
Definition: LayerSupportRules.hpp:90
armnn::TypeNotPerAxisQuantized::TypeNotPerAxisQuantized
TypeNotPerAxisQuantized(const TensorInfo &info)
Definition: LayerSupportRules.hpp:112
armnn::ShapesAreSameTotalSize::ShapesAreSameTotalSize
ShapesAreSameTotalSize(const TensorInfo &info0, const TensorInfo &info1)
Definition: LayerSupportRules.hpp:148
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::QuantizationParametersAreEqual
Definition: LayerSupportRules.hpp:81
armnn::TypeAnyOf::TypeAnyOf
TypeAnyOf(const TensorInfo &info, const Container &c)
Definition: LayerSupportRules.hpp:93
armnn::ShapesAreSameTotalSize
Definition: LayerSupportRules.hpp:146
armnn::TensorNumDimensionsAreGreaterOrEqualTo
Definition: LayerSupportRules.hpp:189
armnn::TypeIs::TypeIs
TypeIs(const TensorInfo &info, DataType dt)
Definition: LayerSupportRules.hpp:104