185 virtual const BackendId& GetId()
const = 0;
198 virtual const BackendIdSet& GetSupportedBackends()
const = 0;
209 using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
240 if (m_NumDimMappings != other.m_NumDimMappings)
return false;
241 for (
unsigned int i = 0; i < m_NumDimMappings; ++i)
243 if (m_DimMappings[i] != other.m_DimMappings[i])
return false;
250 bool isInverse = (GetSize() == other.
GetSize());
251 for (
SizeType i = 0; isInverse && (i < GetSize()); ++i)
253 isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
275 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
281 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
290 operator uint64_t()
const {
return m_Guid; }
294 return m_Guid == other.
m_Guid;
299 return m_Guid != other.
m_Guid;
304 return m_Guid < other.
m_Guid;
309 return m_Guid <= other.
m_Guid;
314 return m_Guid > other.
m_Guid;
319 return m_Guid >= other.
m_Guid;
346 struct hash<
armnn::profiling::ProfilingGuid>
350 return hash<uint64_t>()(uint64_t(guid));
356 struct hash<
armnn::profiling::ProfilingDynamicGuid>
360 return hash<uint64_t>()(uint64_t(guid));
366 struct hash<
armnn::profiling::ProfilingStaticGuid>
370 return hash<uint64_t>()(uint64_t(guid));
bool operator<(const ProfilingGuid &other) const
bool operator>=(const ProfilingGuid &other) const
bool operator==(const ProfilingGuid &other) const
std::unordered_set< BackendId > BackendIdSet
std::size_t operator()(armnn::profiling::ProfilingGuid const &guid) const noexcept
typename ArrayType::const_iterator ConstIterator
Each backend should implement an IBackend.
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
The padding fields don't count and are ignored.
NormalizationAlgorithmChannel
Copyright (c) 2020 ARM Limited.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const &guid) const noexcept
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
PaddingMethod
The padding method modifies the output of pooling layers.
std::shared_ptr< IBackend > IBackendSharedPtr
bool operator<=(const ProfilingGuid &other) const
#define ARMNN_DEPRECATED_ENUM_MSG(message)
std::size_t operator()(armnn::profiling::ProfilingStaticGuid const &guid) const noexcept
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Validate all output shapes.
Device specific knowledge to be passed to the optimizer.
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
min(a, max(b, input)) ReLu1 & ReLu6.
ValueType operator[](SizeType i) const
bool IsEqual(const PermutationVector &other) const
The padding fields count, but are ignored.
ProfilingGuid(uint64_t guid)
bool operator!=(const ProfilingGuid &other) const
Jarret 2009: Local Contrast Normalization.
ConstIterator begin() const
ConstIterator end() const
Infer missing output shapes and validate all output shapes.
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
NormalizationAlgorithmMethod
bool operator>(const ProfilingGuid &other) const
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
constexpr unsigned int MaxNumOfTensorDimensions