19 using namespace armnn;
23 constexpr
const bool maskPermutations[6][4] = {{
false,
false,
false,
false},
24 {
true,
false,
false,
false},
25 {
false,
true,
false,
false},
26 {
false,
false,
true,
false},
27 {
false,
false,
false,
true},
28 {
true,
true,
true,
true}};
30 template<
typename LayerT,
typename... Args>
31 LayerT* BuildGraph(
Graph* graph,
const std::vector<TensorShape>& inputShapes, Args &&... args)
33 auto layer = graph->
AddLayer<LayerT>(std::forward<Args>(args)...);
35 uint32_t inputCount = 0;
36 for (
auto inputShape : inputShapes)
40 auto input = graph->AddLayer<
InputLayer>(
static_cast<int>(inputCount),
"input");
41 input->GetOutputSlot().SetTensorInfo(inputTensorInfo);
42 input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount));
49 template<
typename LayerT>
50 void RunShapeInferenceTest(LayerT*
const layer,
51 const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists)
53 std::vector<unsigned int> numDimensions;
54 std::vector<TensorShape> expectedOutputShapes;
56 for (
auto dimensionSizeList : dimensionSizeLists)
58 numDimensions.emplace_back(dimensionSizeList.size());
59 expectedOutputShapes.emplace_back(
TensorShape(dimensionSizeList));
62 const unsigned int outputSize = layer->GetNumOutputSlots();
64 const auto runTestWithMask = [&](
const bool maskPermutations[])
66 for (
unsigned int i = 0; i < outputSize; ++i)
68 layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations},
72 layer->ValidateTensorShapesFromInputs();
74 for (
unsigned int i = 0; i < outputSize; ++i)
76 CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
81 for (
unsigned int j = 0; j < outputSize; ++j)
91 layer->ValidateTensorShapesFromInputs();
93 for (
unsigned int i = 0; i < outputSize; ++i)
95 CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
99 for (
unsigned int i = 0; i < numDimensions[0]; ++i)
101 runTestWithMask(maskPermutations[i]);
105 runTestWithMask(maskPermutations[5]);
108 template<
typename LayerT,
typename... Args>
109 void CreateGraphAndRunTest(
const std::vector<TensorShape>& inputShapes,
110 const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
115 auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
117 RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
120 TEST_CASE(
"NetworkOptionsTest")
124 {
"InferAndValidate",
true }
130 auto inputLayer = network->AddInputLayer(1,
"inputLayer");
131 inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
135 auto activationLayer = network->AddActivationLayer(descriptor,
"activation");
137 inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
140 CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
142 CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
145 ShapeInferenceMethodOption =
BackendOptions(
"ShapeInferenceMethod",
147 {
"InferAndValidate",
false }
152 inputLayer = network->AddInputLayer(1,
"inputLayer");
153 inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
155 activationLayer = network->AddActivationLayer(descriptor,
"activation");
157 inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
160 CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
164 inputLayer = network->AddInputLayer(1,
"inputLayer");
165 inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
167 activationLayer = network->AddActivationLayer(descriptor,
"activation");
169 inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
172 CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
179 CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor,
"activation");
182 TEST_CASE(
"AdditionTest")
184 CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"add");
187 TEST_CASE(
"ArgMinMaxTest")
193 CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor,
"argMinMax");
196 TEST_CASE(
"BatchNormalizationTest")
199 CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor,
"batchNorm");
202 TEST_CASE(
"BatchToSpaceNdTest")
206 std::vector<unsigned int> blockShape {2, 2};
207 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
213 CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor,
"batchtospacend");
216 TEST_CASE(
"ComparisionTest")
220 CreateGraphAndRunTest<ComparisonLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }},
226 TEST_CASE(
"ConcatTest")
230 descriptor.SetViewOriginCoord(0, 0, 0);
231 descriptor.SetViewOriginCoord(1, 0, 1);
233 CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor,
"concat");
236 TEST_CASE(
"ConstantTest")
240 auto layer = BuildGraph<ConstantLayer>(&graph, {},
"constant");
242 const float Datum = 0.0f;
244 layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
248 layer->ValidateTensorShapesFromInputs();
250 CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
253 TEST_CASE(
"ConvertBf16ToFp32Test")
255 CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"floor");
258 TEST_CASE(
"ConvertFp16ToBf16Test")
261 CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"floor");
264 TEST_CASE(
"ConvertFp16ToFp32Test")
266 CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"floor");
269 TEST_CASE(
"ConvertFp32ToFp16Test")
271 CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"floor");
274 TEST_CASE(
"Convolution2dTest")
291 auto layer = BuildGraph<Convolution2dLayer>(&graph,
296 const float Datum = 0.0f;
298 layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
300 RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
303 TEST_CASE(
"DebugLayerTest")
306 CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"debug");
309 TEST_CASE(
"DepthToSpaceTest")
316 CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor,
"depthtospace");
319 TEST_CASE(
"DepthwiseConvolutionTest")
336 auto layer = BuildGraph<DepthwiseConvolution2dLayer>(&graph,
341 const float Datum = 0.0f;
343 layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
345 RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
348 TEST_CASE(
"DequantizeTest")
351 CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"dequantize");
354 TEST_CASE(
"DetectionPostProcessTest")
373 const float Datum = 0.0f;
378 auto layer = BuildGraph<DetectionPostProcessLayer>(&graph,
379 {detectionBoxesInfo, detectionScoresInfo},
381 "detectionpostprocess");
383 layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
385 RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
388 TEST_CASE(
"FakeQuantizationTest")
391 descriptor.
m_Max = 1;
392 descriptor.
m_Min = 1;
393 CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor,
"fakequantization");
396 TEST_CASE(
"FloorTest")
399 CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"floor");
402 TEST_CASE(
"FullyConnectedTest")
404 const unsigned int inputWidth = 3u;
405 const unsigned int inputHeight = 2u;
406 const unsigned int inputChannels = 1u;
407 const unsigned int outputChannels = 2u;
409 CreateGraphAndRunTest<FullyConnectedLayer>({{ 1, inputChannels, inputHeight, inputWidth },
410 { inputChannels, outputChannels }},
411 {{ 1, outputChannels }},
416 TEST_CASE(
"GatherTest")
418 CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }},
GatherDescriptor(),
"gather");
421 TEST_CASE(
"InstanceNormalizationTest")
425 CreateGraphAndRunTest<InstanceNormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
430 TEST_CASE(
"L2NormalizationTest")
434 CreateGraphAndRunTest<L2NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
439 TEST_CASE(
"LogSoftMaxTest")
443 CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
LogSoftmaxDescriptor(),
"logsoftmax");
446 TEST_CASE(
"LstmTest")
460 auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor,
"lstm");
465 layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
466 layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
467 layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
468 layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
469 layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
470 layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
471 layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
472 layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
473 layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
474 layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
475 layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
476 layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
478 RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
481 TEST_CASE(
"MeanLayerTest")
486 CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor,
"mean");
489 TEST_CASE(
"MemCopyTest")
491 CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"memcopy");
494 TEST_CASE(
"MemImportTest")
496 CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"memomport");
499 TEST_CASE(
"MergeTest")
502 CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }},
"merge");
505 TEST_CASE(
"NormalizationTest")
509 CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
NormalizationDescriptor(),
"l2norm");
512 TEST_CASE(
"PermuteTest")
517 CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor,
"permute");
520 TEST_CASE(
"Pooling2dTest")
532 CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor,
"pooling2d");
535 TEST_CASE(
"QLstmTest")
548 auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor,
"qlstm");
553 layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
554 layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
555 layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
556 layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
557 layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
558 layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
559 layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
560 layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
561 layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
562 layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
563 layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
564 layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
566 RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
569 TEST_CASE(
"QuantizedLstmTest")
576 auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState},
"quatizedlstm");
581 layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
582 layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
583 layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
584 layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
585 layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
586 layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
587 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
588 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
589 layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
590 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
591 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
592 layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
594 RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
597 TEST_CASE(
"QuantizeTest")
600 CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }},
"mean");
603 TEST_CASE(
"RankTest")
609 auto layer = BuildGraph<RankLayer>(&graph, {{ 1, 1, 1, 1 }},
"rank");
618 layer->ValidateTensorShapesFromInputs();
620 CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
624 layer->ValidateTensorShapesFromInputs();
626 CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
629 TEST_CASE(
"ReshapeTest")
635 CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor,
"reshape");
638 TEST_CASE(
"ResizeTest")
645 CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor,
"resize");
648 TEST_CASE(
"SliceTest")
651 descriptor.
m_Begin = { 1, 0, 1, 2 };
652 descriptor.
m_Size = { 2, 1, 2, 3 };
654 CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor,
"mean");
657 TEST_CASE(
"SpaceToBatchNdTest")
661 std::vector<unsigned int> blockShape {2, 2};
662 std::vector<std::pair<unsigned int, unsigned int>> padlist = {{0, 0}, {0, 0}};
668 CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor,
"spacetobatchnd");
671 TEST_CASE(
"SpaceToDepth")
678 CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor,
"spacetodepth");
681 TEST_CASE(
"SplitterTest")
685 descriptor.SetViewSize(0, 0, 1);
686 descriptor.SetViewSize(0, 1, 2);
687 descriptor.SetViewSize(0, 2, 2);
689 descriptor.SetViewSize(1, 0, 1);
690 descriptor.SetViewSize(1, 1, 2);
691 descriptor.SetViewSize(1, 2, 2);
693 CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor,
"splitter");
696 TEST_CASE(
"StackTest")
704 CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor,
"stack");
707 TEST_CASE(
"StridedSliceTest")
711 descriptor.
m_Begin = {0, 0, 0, 0};
712 descriptor.
m_End = {3, 2, 3, 1};
715 CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor,
"stridedslice");
718 TEST_CASE(
"Switchtest")
720 CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }},
"switch");
723 TEST_CASE(
"TransposeConvolution2dTest")
727 descriptor.
m_Begin = {0, 0, 0, 0};
728 descriptor.
m_End = {3, 2, 3, 1};
731 CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor,
"t");
734 TEST_CASE(
"TransposeTest")
739 CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor,
"stridedslice");
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
float m_Min
Minimum value.
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_DilationY
Dilation along y axis.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
float m_NmsIouThreshold
Intersection over union threshold.
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
std::vector< unsigned int > m_BlockShape
Block shape values.
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Validate all output shapes.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Struct for the users to pass backend specific options.
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
SoftmaxDescriptor LogSoftmaxDescriptor
A LogSoftmaxDescriptor for the LogSoftmaxLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A MeanDescriptor for the MeanLayer.
Infer missing output shapes and validate all output shapes.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
float m_Max
Maximum value.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.