20 template<armnn::DataType DataType>
28 template<armnn::DataType DataType>
33 for (
unsigned int i=0; i < numInputs; i++)
38 for (
unsigned int o=0; o < numOutputs; o++)
47 template<
typename LayerType,
typename DescType =
typename LayerType::DescriptorType>
64 template<
typename LayerType>
81 struct DummyLayer<
armnn::BatchNormalizationLayer>
86 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
88 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
90 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
92 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
105 struct DummyLayer<
armnn::BatchToSpaceNdLayer>
121 struct DummyLayer<
armnn::ConstantLayer, void>
153 struct DummyLayer<
armnn::ConcatLayer>
170 struct DummyLayer<
armnn::MapLayer, void>
202 struct DummyLayer<
armnn::SplitterLayer>
219 struct DummyLayer<
armnn::UnmapLayer, void>
234 template <
typename ConvolutionLayerType>
235 struct DummyConvolutionLayer
237 DummyConvolutionLayer()
239 typename ConvolutionLayerType::DescriptorType desc;
243 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
245 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
249 ~DummyConvolutionLayer()
258 struct DummyLayer<
armnn::Convolution2dLayer>
259 :
public DummyConvolutionLayer<armnn::Convolution2dLayer>
264 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
265 :
public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
270 struct DummyLayer<armnn::TransposeConvolution2dLayer>
271 :
public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
276 struct DummyLayer<armnn::DetectionPostProcessLayer>
281 m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
293 template <
typename LstmLayerType>
294 struct DummyLstmLayer
298 typename LstmLayerType::DescriptorType desc;
299 desc.m_CifgEnabled =
false;
302 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
304 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
306 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
308 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
310 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
312 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
314 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
316 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
318 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
321 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
323 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
325 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
338 struct DummyLayer<armnn::LstmLayer>
339 :
public DummyLstmLayer<armnn::LstmLayer>
343 template <
typename QLstmLayerType>
344 struct DummyQLstmLayer
348 typename QLstmLayerType::DescriptorType desc;
349 desc.m_CifgEnabled =
false;
350 desc.m_PeepholeEnabled =
true;
351 desc.m_ProjectionEnabled =
true;
352 desc.m_LayerNormEnabled =
true;
357 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
359 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
361 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
364 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
366 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
368 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
371 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
373 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
375 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
379 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
381 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
383 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
387 m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
389 m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
393 m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
395 m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
397 m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
401 m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
403 m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
405 m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
407 m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
420 struct DummyLayer<armnn::QuantizedLstmLayer, void>
426 m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
428 m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
430 m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
432 m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
435 m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
437 m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
439 m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
441 m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
444 m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
446 m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
448 m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
450 m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
463 struct DummyLayer<armnn::FullyConnectedLayer>
469 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
482 template<armnn::LayerType>
485 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \ 486 template<armnn::DataType DataType> \ 487 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 489 using Type = armnn::name##Layer; \ 490 using Desc = descType; \ 491 using QueueDesc = armnn::name##QueueDescriptor; \ 492 constexpr static const char* NameStr = #name; \ 493 constexpr static const bool IsException = false; \ 495 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ 496 unsigned int nIn, unsigned int nOut) \ 499 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ 500 return factory->Create##name(desc, info); \ 504 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \ 505 template<armnn::DataType DataType> \ 506 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 508 using Type = armnn::name##Layer; \ 509 using Desc = descType; \ 510 using QueueDesc = armnn::name##QueueDescriptor; \ 511 using Workload = armnn::name##Workload; \ 512 constexpr static const char* NameStr = #name; \ 513 constexpr static const bool IsException = false; \ 515 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \ 516 unsigned int nIn, unsigned int nOut) \ 518 IgnoreUnused(factory); \ 520 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ 521 return std::make_unique<armnn::name##Workload>(desc, info); \ 527 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void) 531 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor) 534 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \ 535 template<armnn::DataType DataType> \ 536 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 538 using Type = armnn::name##Layer; \ 539 using Desc = descType; \ 540 constexpr static const char* NameStr = #name; \ 541 constexpr static const bool IsException = true; \ 543 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ 544 unsigned int nIn, unsigned int nOut) \ 546 IgnoreUnused(factory, nIn, nOut); \ 547 return std::unique_ptr<armnn::IWorkload>(); \ 551 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void) 552 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor) 555 template<armnn::LayerType Type, armnn::DataType DataType>
556 struct LayerTypePolicy;
690 template<armnn::LayerType Type>
697 template<armnn::LayerType Type>
704 unsigned int GetNumInputs<armnn::LayerType::Concat>(
const armnn::Layer& layer)
713 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
714 bool IsLayerSupportedTest(
FactoryType *factory, Tag<Type>)
716 using LayerPolicy = LayerTypePolicy<Type, DataType>;
717 using LayerType =
typename LayerPolicy::Type;
718 using LayerDesc =
typename LayerPolicy::Desc;
719 DummyLayer<LayerType, LayerDesc> layer;
721 if (LayerPolicy::IsException)
726 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
727 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
730 DummyLayer<armnn::ConstantLayer, void> previousLayer;
733 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
735 for (
unsigned int i = 0; i < numIn; i++)
737 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
739 previousLayerOutputSlot.
Connect(layerInputSlot);
742 for (
unsigned int i = 0; i < numOut; i++)
744 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
747 std::string layerName = LayerPolicy::NameStr;
748 std::string reasonIfUnsupported;
749 if (FactoryType::IsLayerSupported(*layer.m_Layer,
DataType, reasonIfUnsupported))
751 std::string errorMsg =
" layer expected support but found none.";
754 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() !=
nullptr;
755 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
764 catch(
const std::exception& e)
767 BOOST_TEST_ERROR(layerName <<
": " << errorMsg);
772 errorMsg =
"Unexpected error while testing support for ";
773 BOOST_TEST_ERROR(errorMsg << layerName);
779 std::string errorMsg =
"layer expected no support (giving reason: " + reasonIfUnsupported +
") but found some.";
782 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() ==
nullptr;
783 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
799 catch(
const std::exception& e)
802 BOOST_TEST_ERROR(layerName <<
": " << errorMsg);
807 errorMsg =
"Unexpected error while testing support for ";
808 BOOST_TEST_ERROR(errorMsg << layerName);
814 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
815 bool IsLayerSupportedTest(
FactoryType *factory, Tag<armnn::LayerType::Map>)
821 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
822 bool IsLayerSupportedTest(
FactoryType *factory, Tag<armnn::LayerType::Unmap>)
835 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
836 bool IsLayerSupportedTestsImpl(
FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
838 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
842 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
843 bool IsLayerSupportedTestsImpl(
FactoryType *factory, Tag<Type>)
845 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
848 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
849 (factory, Tag<NextType(Type)>());
853 template<
typename FactoryType, armnn::DataType DataType>
856 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
859 template<armnn::LayerType Type>
860 bool TestLayerTypeMatches()
862 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
863 using LayerType =
typename LayerPolicy::Type;
864 using LayerDesc =
typename LayerPolicy::Desc;
865 DummyLayer<LayerType, LayerDesc> layer;
867 std::stringstream ss;
868 ss << LayerPolicy::NameStr <<
" layer type mismatches expected layer type value.";
869 bool v = Type == layer.m_Layer->GetType();
870 BOOST_CHECK_MESSAGE(v, ss.str());
874 template<armnn::LayerType Type>
875 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
877 return TestLayerTypeMatches<Type>();
880 template<armnn::LayerType Type>
881 bool LayerTypeMatchesTestImpl(Tag<Type>)
883 return TestLayerTypeMatches<Type>() &&
884 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
887 template<
typename FactoryType,
typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
888 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
899 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
900 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
901 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
902 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
904 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
909 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
910 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
928 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
931 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
933 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
934 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
936 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
941 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
942 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
960 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
963 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
965 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
966 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
968 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
973 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
974 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
977 static const std::vector<unsigned> axes = {1, 0};
988 input->GetOutputSlot(0).Connect(layer->
GetInputSlot(0));
989 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
993 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1000 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1001 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1004 static const std::vector<unsigned> axes = {};
1017 input->GetOutputSlot(0).Connect(layer->
GetInputSlot(0));
1018 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1022 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1027 template<
typename FactoryType, armnn::DataType OutputDataType>
1028 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1040 bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
int Connect(InputSlot &destination)
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Copyright (c) 2020 ARM Limited.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > &rBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output)
This layer represents a memory copy operation.
This layer represents a memory copy operation.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
void Gather(const TensorInfo ¶msInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > ¶ms, const int32_t *indices, Encoder< float > &output, const int32_t axis)
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
This layer represents a merge operation.
float Activation(float in, ActivationFunction function, float a, float b)
This layer represents a BatchToSpaceNd operation.
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor ¶ms, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
ClWorkloadFactory FactoryType
A MeanDescriptor for the MeanLayer.
void Mean(const armnn::TensorInfo &inputInfo, const armnn::TensorInfo &outputInfo, const std::vector< unsigned int > &axis, Decoder< float > &input, Encoder< float > &output)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Contains information about inputs and outputs to a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const PadQueueDescriptor &data)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor ¶ms)
Computes the Pooling2d operation.
void Splitter(const SplitterQueueDescriptor &data)
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
A BatchNormalizationDescriptor for the BatchNormalizationLayer.