20 template<armnn::DataType DataType>
28 template<armnn::DataType DataType>
33 for (
unsigned int i=0; i < numInputs; i++)
38 for (
unsigned int o=0; o < numOutputs; o++)
47 template<
typename LayerType,
typename DescType =
typename LayerType::DescriptorType>
64 template<
typename LayerType>
81 struct DummyLayer<
armnn::BatchNormalizationLayer>
86 m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
88 m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
90 m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
92 m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
105 struct DummyLayer<
armnn::BatchToSpaceNdLayer>
121 struct DummyLayer<
armnn::ConstantLayer, void>
153 struct DummyLayer<
armnn::ConcatLayer>
170 struct DummyLayer<
armnn::MapLayer, void>
202 struct DummyLayer<
armnn::SplitterLayer>
219 struct DummyLayer<
armnn::UnmapLayer, void>
234 template <
typename ConvolutionLayerType>
235 struct DummyConvolutionLayer
237 DummyConvolutionLayer()
239 typename ConvolutionLayerType::DescriptorType desc;
243 m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
245 m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
249 ~DummyConvolutionLayer()
258 struct DummyLayer<
armnn::Convolution2dLayer>
259 :
public DummyConvolutionLayer<armnn::Convolution2dLayer>
264 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
265 :
public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
270 struct DummyLayer<armnn::TransposeConvolution2dLayer>
271 :
public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
276 struct DummyLayer<armnn::DetectionPostProcessLayer>
281 m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
293 template <
typename LstmLayerType>
294 struct DummyLstmLayer
298 typename LstmLayerType::DescriptorType desc;
299 desc.m_CifgEnabled =
false;
302 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
304 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
306 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
308 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
310 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
312 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
314 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
316 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
318 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
321 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
323 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
325 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
338 struct DummyLayer<armnn::LstmLayer>
339 :
public DummyLstmLayer<armnn::LstmLayer>
343 template <
typename QLstmLayerType>
344 struct DummyQLstmLayer
348 typename QLstmLayerType::DescriptorType desc;
349 desc.m_CifgEnabled =
false;
350 desc.m_PeepholeEnabled =
true;
351 desc.m_ProjectionEnabled =
true;
352 desc.m_LayerNormEnabled =
true;
357 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
359 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
361 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
364 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
366 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
368 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
371 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
373 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
375 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
379 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
381 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
383 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
387 m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
389 m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>(
393 m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
395 m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
397 m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
401 m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
403 m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
405 m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
407 m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
420 struct DummyLayer<armnn::QuantizedLstmLayer, void>
426 m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
428 m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
430 m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
432 m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
435 m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
437 m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
439 m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
441 m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
444 m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
446 m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
448 m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
450 m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
463 struct DummyLayer<armnn::FullyConnectedLayer>
469 m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
482 template<armnn::LayerType>
485 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \ 486 template<armnn::DataType DataType> \ 487 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 489 using Type = armnn::name##Layer; \ 490 using Desc = descType; \ 491 using QueueDesc = armnn::name##QueueDescriptor; \ 492 constexpr static const char* NameStr = #name; \ 493 constexpr static const bool IsException = false; \ 495 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ 496 unsigned int nIn, unsigned int nOut) \ 499 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ 500 return factory->Create##name(desc, info); \ 504 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \ 505 template<armnn::DataType DataType> \ 506 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 508 using Type = armnn::name##Layer; \ 509 using Desc = descType; \ 510 using QueueDesc = armnn::name##QueueDescriptor; \ 511 using Workload = armnn::name##Workload; \ 512 constexpr static const char* NameStr = #name; \ 513 constexpr static const bool IsException = false; \ 515 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \ 516 unsigned int nIn, unsigned int nOut) \ 518 IgnoreUnused(factory); \ 520 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ 521 return std::make_unique<armnn::name##Workload>(desc, info); \ 527 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void) 531 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor) 534 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \ 535 template<armnn::DataType DataType> \ 536 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 538 using Type = armnn::name##Layer; \ 539 using Desc = descType; \ 540 constexpr static const char* NameStr = #name; \ 541 constexpr static const bool IsException = true; \ 543 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ 544 unsigned int nIn, unsigned int nOut) \ 546 IgnoreUnused(factory, nIn, nOut); \ 547 return std::unique_ptr<armnn::IWorkload>(); \ 551 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void) 552 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor) 555 template<armnn::LayerType Type, armnn::DataType DataType>
556 struct LayerTypePolicy;
694 template<armnn::LayerType Type>
701 template<armnn::LayerType Type>
708 unsigned int GetNumInputs<armnn::LayerType::Concat>(
const armnn::Layer& layer)
717 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
718 bool IsLayerSupportedTest(
FactoryType *factory, Tag<Type>)
720 using LayerPolicy = LayerTypePolicy<Type, DataType>;
721 using LayerType =
typename LayerPolicy::Type;
722 using LayerDesc =
typename LayerPolicy::Desc;
723 DummyLayer<LayerType, LayerDesc> layer;
725 if (LayerPolicy::IsException)
730 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
731 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
734 DummyLayer<armnn::ConstantLayer, void> previousLayer;
737 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
739 for (
unsigned int i = 0; i < numIn; i++)
741 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
743 previousLayerOutputSlot.
Connect(layerInputSlot);
746 for (
unsigned int i = 0; i < numOut; i++)
748 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
751 std::string layerName = LayerPolicy::NameStr;
752 std::string reasonIfUnsupported;
753 if (FactoryType::IsLayerSupported(*layer.m_Layer,
DataType, reasonIfUnsupported))
755 std::string errorMsg =
" layer expected support but found none.";
758 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() !=
nullptr;
759 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
768 catch(
const std::exception& e)
771 BOOST_TEST_ERROR(layerName <<
": " << errorMsg);
776 errorMsg =
"Unexpected error while testing support for ";
777 BOOST_TEST_ERROR(errorMsg << layerName);
783 std::string errorMsg =
"layer expected no support (giving reason: " + reasonIfUnsupported +
") but found some.";
786 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() ==
nullptr;
787 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
803 catch(
const std::exception& e)
806 BOOST_TEST_ERROR(layerName <<
": " << errorMsg);
811 errorMsg =
"Unexpected error while testing support for ";
812 BOOST_TEST_ERROR(errorMsg << layerName);
818 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
819 bool IsLayerSupportedTest(
FactoryType *factory, Tag<armnn::LayerType::Map>)
825 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
826 bool IsLayerSupportedTest(
FactoryType *factory, Tag<armnn::LayerType::Unmap>)
839 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
840 bool IsLayerSupportedTestsImpl(
FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
842 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
846 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
847 bool IsLayerSupportedTestsImpl(
FactoryType *factory, Tag<Type>)
849 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
852 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
853 (factory, Tag<NextType(Type)>());
857 template<
typename FactoryType, armnn::DataType DataType>
860 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
863 template<armnn::LayerType Type>
864 bool TestLayerTypeMatches()
866 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
867 using LayerType =
typename LayerPolicy::Type;
868 using LayerDesc =
typename LayerPolicy::Desc;
869 DummyLayer<LayerType, LayerDesc> layer;
871 std::stringstream ss;
872 ss << LayerPolicy::NameStr <<
" layer type mismatches expected layer type value.";
873 bool v = Type == layer.m_Layer->GetType();
874 BOOST_CHECK_MESSAGE(v, ss.str());
878 template<armnn::LayerType Type>
879 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
881 return TestLayerTypeMatches<Type>();
884 template<armnn::LayerType Type>
885 bool LayerTypeMatchesTestImpl(Tag<Type>)
887 return TestLayerTypeMatches<Type>() &&
888 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
891 template<
typename FactoryType,
typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
892 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
903 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
904 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
905 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
906 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
908 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
913 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
914 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
932 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
935 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
937 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
938 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
940 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
945 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
946 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
964 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
967 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
969 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
970 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
972 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
977 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
978 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
981 static const std::vector<unsigned> axes = {1, 0};
992 input->GetOutputSlot(0).Connect(layer->
GetInputSlot(0));
993 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
997 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1004 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1005 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1008 static const std::vector<unsigned> axes = {};
1021 input->GetOutputSlot(0).Connect(layer->
GetInputSlot(0));
1022 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1026 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1031 template<
typename FactoryType, armnn::DataType OutputDataType>
1032 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1044 bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle *> inputs, std::vector< ITensorHandle *> outputs)
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
int Connect(InputSlot &destination)
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Copyright (c) 2021 ARM Limited and Contributors.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
This layer represents a memory copy operation.
This layer represents a memory copy operation.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
void Gather(const TensorInfo ¶msInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > ¶ms, const int32_t *indices, Encoder< float > &output, const int32_t axis)
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
This layer represents a merge operation.
float Activation(float in, ActivationFunction function, float a, float b)
This layer represents a BatchToSpaceNd operation.
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const ITensorHandle *inputHandle, ITensorHandle *outputHandle, const PadQueueDescriptor &data)
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor ¶ms, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
ClWorkloadFactory FactoryType
A MeanDescriptor for the MeanLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor ¶ms)
Computes the Pooling2d operation.
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...