151 virtual const BackendId& GetId()
const = 0;
164 virtual const BackendIdSet& GetSupportedBackends()
const = 0;
175 using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
206 if (m_NumDimMappings != other.m_NumDimMappings)
return false;
207 for (
unsigned int i = 0; i < m_NumDimMappings; ++i)
209 if (m_DimMappings[i] != other.m_DimMappings[i])
return false;
216 bool isInverse = (GetSize() == other.
GetSize());
217 for (
SizeType i = 0; isInverse && (i < GetSize()); ++i)
219 isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
241 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
247 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
254 operator uint64_t()
const {
return m_Guid; }
258 return m_Guid == other.
m_Guid;
263 return m_Guid != other.
m_Guid;
268 return m_Guid < other.
m_Guid;
273 return m_Guid <= other.
m_Guid;
278 return m_Guid > other.
m_Guid;
283 return m_Guid >= other.
m_Guid;
310 struct hash<
armnn::profiling::ProfilingGuid>
314 return hash<uint64_t>()(uint64_t(guid));
320 struct hash<
armnn::profiling::ProfilingDynamicGuid>
324 return hash<uint64_t>()(uint64_t(guid));
330 struct hash<
armnn::profiling::ProfilingStaticGuid>
334 return hash<uint64_t>()(uint64_t(guid));
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
std::size_t operator()(armnn::profiling::ProfilingStaticGuid const &guid) const noexcept
bool IsEqual(const PermutationVector &other) const
Each backend should implement an IBackend.
ValueType operator[](SizeType i) const
bool operator>(const ProfilingGuid &other) const
bool IsInverse(const PermutationVector &other) const
ConstIterator begin() const
ConstIterator end() const
#define ARMNN_DEPRECATED_ENUM_MSG(message)
std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const &guid) const noexcept
constexpr unsigned int LOWEST_CAPTURE_PERIOD
Krichevsky 2012: Local Brightness Normalization.
Jarret 2009: Local Contrast Normalization.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
std::shared_ptr< IBackend > IBackendSharedPtr
The padding fields count, but are ignored.
The padding fields don't count and are ignored.
std::size_t operator()(armnn::profiling::ProfilingGuid const &guid) const noexcept
NormalizationAlgorithmMethod
typename ArrayType::const_iterator ConstIterator
ProfilingGuid(uint64_t guid)
constexpr unsigned int MaxNumOfTensorDimensions
bool operator==(const ProfilingGuid &other) const
std::unordered_set< BackendId > BackendIdSet
NormalizationAlgorithmChannel
bool operator<=(const ProfilingGuid &other) const
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
bool operator>=(const ProfilingGuid &other) const
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
bool operator!=(const ProfilingGuid &other) const
bool operator<(const ProfilingGuid &other) const
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Device specific knowledge to be passed to the optimizer.