214 virtual const BackendId& GetId()
const = 0;
240 virtual const BackendIdSet& GetSupportedBackends()
const = 0;
251 using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
286 if (m_NumDimMappings != other.m_NumDimMappings)
return false;
287 for (
unsigned int i = 0; i < m_NumDimMappings; ++i)
289 if (m_DimMappings[i] != other.m_DimMappings[i])
return false;
296 bool isInverse = (GetSize() == other.
GetSize());
297 for (
SizeType i = 0; isInverse && (i < GetSize()); ++i)
299 isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
310 namespace profiling {
class ProfilingGuid; }
321 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
330 #define LIST_OF_LAYER_TYPE \ 334 X(BatchNormalization) \ 339 X(ConvertBf16ToFp32) \ 340 X(ConvertFp16ToFp32) \ 341 X(ConvertFp32ToBf16) \ 342 X(ConvertFp32ToFp16) \ 346 X(DepthwiseConvolution2d) \ 348 X(DetectionPostProcess) \ 350 X(ElementwiseUnary) \ 351 X(FakeQuantization) \ 357 X(InstanceNormalization) \ 395 X(TransposeConvolution2d) \ 399 X(UnidirectionalSequenceLstm) \ 407 #define X(name) name,
std::chrono::high_resolution_clock::time_point HighResolutionClock
Define a timer and associated inference ID for recording execution times.
constexpr unsigned int EXPIRE_RATE
Variable to control expire rate of priority queue.
std::unordered_set< BackendId > BackendIdSet
typename ArrayType::const_iterator ConstIterator
Each backend should implement an IBackend.
The padding fields don't count and are ignored.
#define LIST_OF_LAYER_TYPE
This list uses X macro technique.
NormalizationAlgorithmChannel
Copyright (c) 2021 ARM Limited and Contributors.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
PaddingMethod
The padding method modifies the output of pooling layers.
Constant weights can be accessed through the descriptors, On the other hand, non-const weights can be...
std::shared_ptr< IBackend > IBackendSharedPtr
#define ARMNN_DEPRECATED_ENUM_MSG(message)
BackendCapability
BackendCapability class.
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Validate all output shapes.
Device specific knowledge to be passed to the optimizer.
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
min(a, max(b, input)) ReLu1 & ReLu6.
ValueType operator[](SizeType i) const
bool IsEqual(const PermutationVector &other) const
The padding fields count, but are ignored.
MemorySource
Define the Memory Source to reduce copies.
Jarret 2009: Local Contrast Normalization.
ConstIterator begin() const
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
ConstIterator end() const
Infer missing output shapes and validate all output shapes.
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
NormalizationAlgorithmMethod
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
const char * GetLayerTypeAsCString(LayerType type)
std::pair< HighResolutionClock, HighResolutionClock > InferenceTimingPair
constexpr unsigned int MaxNumOfTensorDimensions
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...