25 #include <doctest/doctest.h> 27 using namespace armnn;
32 void CreateLSTMLayerHelper(
Graph &graph,
bool CifgEnabled)
43 unsigned int batchSize = 3;
44 unsigned int inputSize = 2;
45 unsigned int numUnits = 4;
46 unsigned int outputSize = 4;
77 if (!layerDesc.m_CifgEnabled)
90 if (layerDesc.m_ProjectionEnabled)
100 if (layerDesc.m_PeepholeEnabled)
102 if (!layerDesc.m_CifgEnabled)
129 armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
132 Connect(input, layer, lstmTensorInfo1, 0, 0);
133 Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
134 Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
135 Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
136 Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
137 Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
138 Connect(layer, output, lstmTensorInfo3, 3, 0);
146 const std::vector<TensorInfo>& infos,
161 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
162 reasonIfUnsupported);
189 template <
typename NamePolicy>
193 CustomAllocatorBackend() :
194 m_BackendCapabilities(NamePolicy::GetIdStatic(), {{
"NullCapability",
false}}),
195 m_CustomAllocator(
false) {};
197 m_BackendCapabilities(capabilities),
198 m_CustomAllocator(false) {};
199 ~CustomAllocatorBackend() =
default;
203 return NamePolicy::GetIdStatic();
207 return GetIdStatic();
228 return std::make_shared<MockLayerSupport>();
238 return m_BackendCapabilities;
241 virtual bool UseCustomMemoryAllocator(std::shared_ptr<ICustomAllocator> allocator,
245 m_CustomAllocator =
true;
246 return m_CustomAllocator;
250 bool m_CustomAllocator;
253 template <
typename NamePolicy>
257 NoProtectedModeMockBackend() : m_BackendCapabilities(NamePolicy::GetIdStatic(), {{
"NullCapability",
false}}) {};
258 NoProtectedModeMockBackend(
const BackendCapabilities& capabilities) : m_BackendCapabilities(capabilities) {};
259 ~NoProtectedModeMockBackend() =
default;
263 return NamePolicy::GetIdStatic();
267 return GetIdStatic();
288 return std::make_shared<MockLayerSupport>();
298 return m_BackendCapabilities;
310 TEST_CASE(
"LSTMValidateTensorShapesFromInputsCIFGDisabledTest")
315 CreateLSTMLayerHelper(graph,
false);
321 TEST_CASE(
"LSTMValidateTensorShapesFromInputsCIFGEnabledTest")
326 CreateLSTMLayerHelper(graph,
true);
332 TEST_CASE(
"InsertConvertersTest")
346 ->GetOutputHandler().SetTensorInfo(
info);
355 ->GetOutputHandler().SetTensorInfo(
info);
360 &IsLayerOfType<armnn::InputLayer>,
361 &IsLayerOfType<armnn::InputLayer>,
362 &IsLayerOfType<armnn::MemCopyLayer>,
363 &IsLayerOfType<armnn::FloorLayer>,
364 &IsLayerOfType<armnn::AdditionLayer>,
365 &IsLayerOfType<armnn::OutputLayer>));
368 for (
auto& layer : graph)
378 for (
auto& layer : graph)
388 for (
auto& layer : graph)
410 &IsLayerOfType<armnn::InputLayer>,
411 &IsLayerOfType<armnn::InputLayer>,
412 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
413 &IsLayerOfType<armnn::MemCopyLayer>,
414 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
415 &IsLayerOfType<armnn::FloorLayer>,
416 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
417 &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
418 &IsLayerOfType<armnn::AdditionLayer>,
419 &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
420 &IsLayerOfType<armnn::OutputLayer>));
423 void CreateConvolution2dGraph(
Graph &graph,
const unsigned int* inputShape,
424 const unsigned int* weightsShape,
const unsigned int* outputShape,
430 std::vector<float> weightsVector(90);
445 layer->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
weights);
453 TEST_CASE(
"Conv2dValidateTensorShapesFromInputs")
456 const unsigned int inputShape[] = { 1, 3, 8, 16 };
457 const unsigned int weightsShape[] = { 2, 3, 5, 3 };
458 const unsigned int outputShape[] = { 1, 2, 4, 14 };
459 CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
464 TEST_CASE(
"Conv2dValidateTensorShapesFromInputsNhwc")
467 const unsigned int inputShape[] = { 1, 8, 16, 3 };
468 const unsigned int weightsShape[] = { 2, 5, 3, 3 };
469 const unsigned int outputShape[] = { 1, 4, 14, 2 };
470 CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape,
DataLayout::NHWC);
475 void CreateDepthwiseConvolution2dGraph(
Graph &graph,
const unsigned int* inputShape,
476 const unsigned int* weightsShape,
const unsigned int* outputShape,
482 std::vector<float> weightsVector(18);
497 layer->
m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
weights);
505 TEST_CASE(
"DepthwiseConv2dValidateTensorShapesFromInputs")
508 const unsigned int inputShape[] = { 1, 2, 3, 3 };
509 const unsigned int weightsShape[] = { 1, 3, 3, 2 };
510 const unsigned int outputShape[] = { 1, 2, 1, 1 };
511 CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
516 TEST_CASE(
"DepthwiseConv2dValidateTensorShapesFromInputsNhwc")
519 const unsigned int inputShape[] = { 1, 3, 3, 2 };
520 const unsigned int weightsShape[] = { 1, 3, 3, 2 };
521 const unsigned int outputShape[] = { 1, 1, 1, 2 };
522 CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape,
DataLayout::NHWC);
527 void CreatePooling2dGraph(
Graph& graph,
const unsigned int* inputShape,
const unsigned int* outputShape,
555 TEST_CASE(
"Pooling2dValidateTensorShapesFromInputs")
558 const unsigned int inputShape[] = { 5, 3, 52, 60 };
559 const unsigned int outputShape[] = { 5, 3, 11, 13 };
565 TEST_CASE(
"Pooling2dValidateTensorShapesFromInputsNhwc")
568 const unsigned int inputShape[] = { 5, 52, 60, 3 };
569 const unsigned int outputShape[] = { 5, 11, 13, 3 };
575 void CreateResizeBilinearGraph(
Graph& graph,
576 const unsigned int* inputShape,
577 const unsigned int* outputShape,
600 TEST_CASE(
"ResizeBilinearValidateTensorShapesFromInputs")
603 const unsigned int inputShape[] = { 1, 2, 4, 5 };
604 const unsigned int outputShape[] = { 1, 2, 3, 4 };
605 CreateResizeBilinearGraph(graph, inputShape, outputShape);
610 TEST_CASE(
"ResizeBilinearValidateTensorShapesFromInputsNhwc")
613 const unsigned int inputShape[] = { 1, 4, 5, 2 };
614 const unsigned int outputShape[] = { 1, 3, 4, 2 };
615 CreateResizeBilinearGraph(graph, inputShape, outputShape,
DataLayout::NHWC);
620 void CreateGatherGraph(
Graph& graph,
641 TEST_CASE(
"GatherValidateTensorShapesFromInputs")
648 CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
653 TEST_CASE(
"GatherValidateTensorShapesFromInputs1DParams")
660 CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
665 TEST_CASE(
"GatherValidateTensorShapesFromInputsMultiDimIndices")
672 CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
677 TEST_CASE(
"DetectionPostProcessValidateTensorShapes")
682 std::vector<uint8_t> anchorsVector(40);
706 input0->GetOutputSlot().Connect(layer->
GetInputSlot(0));
712 TEST_CASE(
"BackendCapabilityTest")
726 TEST_CASE(
"BackendHintTest")
728 class TestBackendAssignment :
public StrategyBase<NoThrowStrategy>
734 const std::vector<armnn::ConstTensor>& constants,
743 auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
744 const auto connectedLayerBackendId = inputLayer->GetOutputSlot(0).GetOwningLayer().GetBackendId();
745 CHECK((inputLayer->GetBackendId() == connectedLayerBackendId));
750 auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
751 CHECK((outputLayer->GetBackendId() ==
"MockBackend"));
756 auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
757 CHECK((activation->GetBackendId() ==
"CustomBackend"));
788 backendRegistry.Register(
"MockBackend", []() {
return std::make_unique<CustomAllocatorBackend<MockPolicy>>(); });
790 backendRegistry.Register(
"CustomBackend",
791 []() {
return std::make_unique<CustomAllocatorBackend<CustomPolicy>>(); });
798 std::unique_ptr<Graph> graph = std::make_unique<Graph>();
799 auto input = graph->AddLayer<
InputLayer>(0,
"input");
801 auto output = graph->AddLayer<
OutputLayer>(0,
"output");
803 BackendId customBackendId(
"CustomBackend");
814 std::vector<BackendId> prefs{
"MockBackend",
"CustomBackend" };
816 BackendIdSet availableBackends = {
"CustomBackend",
"MockBackend" };
834 TestBackendAssignment visitor;
835 for (
auto it = firstLayer; it != lastLayer; ++it)
837 (*it)->ExecuteStrategy(visitor);
840 backendRegistry.Deregister(
"MockBackend");
841 backendRegistry.Deregister(
"CustomBackend");
845 TEST_CASE(
"OptimizeForExclusiveConnectionsFuseTest")
847 using namespace armnn;
855 const unsigned int inputDimensionSizes[] = { 1, 4, 4, 3 };
856 const unsigned int weightsDimensionSizes[] = { 1, 2, 2, 3 };
857 const unsigned int outputDimensionSizes[] = { 1, 3, 3, 1 };
858 const unsigned int outputChannelSize[] = { outputDimensionSizes[3] };
863 std::vector<float> weightsVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
866 std::vector<float> betaVector = { 0.1f };
867 std::vector<float> gammaVector = { 0.5f };
868 std::vector<float> meanVector = { 0 };
869 std::vector<float> varianceVector = { 1 };
886 conv->m_Weight = std::make_unique<ScopedTensorHandle>(
weights);
887 batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(
beta);
888 batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(
gamma);
889 batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(
mean);
890 batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
891 if (convolution2dDescriptor.m_BiasEnabled)
893 std::vector<float> biasVector = { 11 };
895 conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
905 &IsLayerOfType<InputLayer>,
906 &IsLayerOfType<Convolution2dLayer>,
907 &IsLayerOfType<BatchNormalizationLayer>,
908 &IsLayerOfType<OutputLayer>));
913 auto checkFusedConv2d = [](
const armnn::Layer*
const layer) ->
bool {
914 return IsLayerOfType<armnn::Convolution2dLayer>(layer) &&
915 (layer->
GetNameStr() ==
"fused-batchNorm-into-convolution");
920 &IsLayerOfType<InputLayer>,
922 &IsLayerOfType<OutputLayer>));
926 TEST_CASE(
"OptimizeForExclusiveConnectionsWithoutFuseTest")
947 &IsLayerOfType<armnn::InputLayer>,
948 &IsLayerOfType<armnn::Convolution2dLayer>,
949 &IsLayerOfType<armnn::BatchNormalizationLayer>,
950 &IsLayerOfType<armnn::OutputLayer>,
951 &IsLayerOfType<armnn::OutputLayer>));
957 &IsLayerOfType<armnn::InputLayer>,
958 &IsLayerOfType<armnn::Convolution2dLayer>,
959 &IsLayerOfType<armnn::BatchNormalizationLayer>,
960 &IsLayerOfType<armnn::OutputLayer>,
961 &IsLayerOfType<armnn::OutputLayer>));
TEST_SUITE("TestConstTensorLayerVisitor")
std::shared_ptr< ConstTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
std::shared_ptr< ConstTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_BiasEnabled
Enable/disable bias.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
LstmBasicParameters m_BasicParameters
This layer represents a batch normalization operation.
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const TensorInfo const TensorInfo & anchors
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
std::shared_ptr< ConstTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
uint32_t m_PadLeft
Padding left value in the width dimension.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
bool IsOutputSupported(const TensorInfo &, Optional< std::string &>) const override
float m_ClippingThresProj
Clipping threshold value for the projection.
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
std::unordered_set< BackendId > BackendIdSet
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
static void Pass(Graph &graph, const Optimizations &optimizations)
The padding fields don't count and are ignored.
bool IsInputSupported(const TensorInfo &, Optional< std::string &>) const override
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
uint32_t m_PadTop
Padding top value in the height dimension.
std::shared_ptr< ConstTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
This layer represents a detection postprocess operator.
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
LayerList::const_iterator Iterator
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
std::shared_ptr< ConstTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
Base class for all descriptors.
Strategy base class with empty implementations.
std::shared_ptr< ConstTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
The SubgraphView class represents a subgraph of a Graph.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_MaxDetections
Maximum numbers of detections.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::shared_ptr< ConstTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
std::shared_ptr< ConstTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
This layer represents a Gather operator.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
DataType GetDataType() const
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
std::shared_ptr< ConstTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
bool m_PeepholeEnabled
Enable/disable peephole.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
This layer represents a memory copy operation.
#define ARMNN_ASSERT(COND)
std::shared_ptr< ConstTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
An ActivationDescriptor for the ActivationLayer.
This layer represents a floor operation.
std::shared_ptr< ConstTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
This layer represents a pooling 2d operation.
float m_ClippingThresCell
Clipping threshold value for the cell state.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents an addition operation.
LstmOptPeepholeParameters m_PeepholeParameters
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
Struct for the users to pass backend specific options.
LstmOptProjectionParameters m_ProjectionParameters
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
void SetTensorInfo(const TensorInfo &tensorInfo) override
DataType GetDataType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
LstmOptCifgParameters m_CifgParameters
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
const TensorInfo & input1
A Pooling2dDescriptor for the Pooling2dLayer.
std::shared_ptr< ConstTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
size_t GetNumLayers() const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
const TensorInfo & GetTensorInfo() const override
static INetworkPtr Create(NetworkOptions networkOptions={})
const char * GetLayerTypeAsCString(LayerType type)
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
This layer represents a resize operation.
const TensorInfo const TensorInfo & mean
std::shared_ptr< ConstTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
std::unique_ptr< IBackendContext > IBackendContextPtr