From c9cc80455ff29fd2c8622c9487ec9c57ade6ea30 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 1 Nov 2018 16:15:57 +0000 Subject: IVGCVSW-1946: Remove armnn/src from the include paths Change-Id: I663a0a0fccb43ee960ec070121a59df9db0bb04e --- src/backends/test/IsLayerSupportedTestImpl.hpp | 563 ------------------------- 1 file changed, 563 deletions(-) delete mode 100644 src/backends/test/IsLayerSupportedTestImpl.hpp (limited to 'src/backends/test/IsLayerSupportedTestImpl.hpp') diff --git a/src/backends/test/IsLayerSupportedTestImpl.hpp b/src/backends/test/IsLayerSupportedTestImpl.hpp deleted file mode 100644 index 722d82d8ab..0000000000 --- a/src/backends/test/IsLayerSupportedTestImpl.hpp +++ /dev/null @@ -1,563 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#pragma once - -#include "Graph.hpp" - -#include - -namespace -{ -armnn::Graph dummyGraph; - -// Make a dummy TensorInfo object. -template -armnn::TensorInfo MakeDummyTensorInfo() -{ - return armnn::TensorInfo({2,2,2,2}, DataType); -} - - -// Make a dummy WorkloadInfo using a dummy TensorInfo. -template -armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs) -{ - armnn::WorkloadInfo info; - for (unsigned int i=0; i < numInputs; i++) - { - info.m_InputTensorInfos.push_back(MakeDummyTensorInfo()); - } - for (unsigned int o=0; o < numOutputs; o++) - { - info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo()); - } - return info; -} - -// Template class to create a dummy layer (2 parameters). -template -struct DummyLayer -{ - DummyLayer() - { - m_Layer = dummyGraph.AddLayer(DescType(), ""); - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - LayerType* m_Layer; -}; - -// Template class to create a dummy layer (1 parameter). -template -struct DummyLayer -{ - DummyLayer() - { - m_Layer = dummyGraph.AddLayer(""); - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - LayerType* m_Layer; -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - m_Layer = dummyGraph.AddLayer(armnn::BatchNormalizationDescriptor(), ""); - m_Layer->m_Mean = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Variance = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Beta = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Gamma = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::BatchNormalizationLayer* m_Layer; - -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - m_Layer = dummyGraph.AddLayer(""); - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::ConstantLayer* m_Layer; -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - m_Layer = dummyGraph.AddLayer(armnn::LayerBindingId(), ""); - - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::InputLayer* m_Layer; -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - armnn::OriginsDescriptor desc(2); - m_Layer = dummyGraph.AddLayer(desc, ""); - - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::MergerLayer* m_Layer; -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - m_Layer = dummyGraph.AddLayer(armnn::LayerBindingId(), ""); - - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::OutputLayer* m_Layer; -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - armnn::ViewsDescriptor desc(1); - m_Layer = dummyGraph.AddLayer(desc, ""); - - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::SplitterLayer* m_Layer; -}; - -template -struct DummyConvolutionLayer -{ - DummyConvolutionLayer() - { - typename ConvolutionLayerType::DescriptorType desc; - m_Layer = dummyGraph.AddLayer(desc, ""); - m_Layer->m_Weight = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_Bias = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - } - ~DummyConvolutionLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - ConvolutionLayerType* m_Layer; -}; - -template<> -struct DummyLayer - : public DummyConvolutionLayer -{ -}; - -template<> -struct DummyLayer - : public DummyConvolutionLayer -{ -}; - -template -struct DummyLstmLayer -{ - DummyLstmLayer() - { - typename LstmLayerType::DescriptorType desc; - desc.m_CifgEnabled = false; - - m_Layer = dummyGraph.AddLayer(armnn::LstmDescriptor(), ""); - m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_CellBias = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - - m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - } - ~DummyLstmLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::LstmLayer* m_Layer; -}; - -template<> -struct DummyLayer - : public DummyLstmLayer -{ -}; - -template<> -struct DummyLayer -{ - DummyLayer() - { - armnn::FullyConnectedLayer::DescriptorType desc; - m_Layer = dummyGraph.AddLayer(desc, ""); - m_Layer->m_Weight = std::make_unique( - armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); - } - ~DummyLayer() - { - dummyGraph.EraseLayer(m_Layer); - } - armnn::FullyConnectedLayer* m_Layer; -}; - -// Tag for giving LayerType entries a unique strong type each. -template -struct Tag{}; - -#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \ -template \ -struct LayerTypePolicy \ -{ \ - using Type = armnn::name##Layer; \ - using Desc = descType; \ - using QueueDesc = armnn::name##QueueDescriptor; \ - constexpr static const char* NameStr = #name; \ - \ - static std::unique_ptr MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ - unsigned int nIn, unsigned int nOut) \ - { \ - QueueDesc desc; \ - armnn::WorkloadInfo info = MakeDummyWorkloadInfo(nIn, nOut); \ - return factory->Create##name(desc, info); \ - } \ -}; - -// Define a layer policy specialization for use with the IsLayerSupported tests. -// Use this version for layers whose constructor takes 1 parameter(name). -#define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void) - -// Define a layer policy specialization for use with the IsLayerSupported tests. -// Use this version for layers whose constructor takes 2 parameters(descriptor and name). -#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor) - -// Layer policy template. -template -struct LayerTypePolicy; - -// Every entry in the armnn::LayerType enum must be accounted for below. -DECLARE_LAYER_POLICY_2_PARAM(Activation) - -DECLARE_LAYER_POLICY_1_PARAM(Addition) - -DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) - -DECLARE_LAYER_POLICY_1_PARAM(Constant) - -DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) - -DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16) - -DECLARE_LAYER_POLICY_2_PARAM(Convolution2d) - -DECLARE_LAYER_POLICY_1_PARAM(MemCopy) - -DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d) - -DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization) - -DECLARE_LAYER_POLICY_1_PARAM(Floor) - -DECLARE_LAYER_POLICY_2_PARAM(FullyConnected) - -DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId) - -DECLARE_LAYER_POLICY_2_PARAM(L2Normalization) - -DECLARE_LAYER_POLICY_2_PARAM(Lstm) - -DECLARE_LAYER_POLICY_2_PARAM(Mean) - -DECLARE_LAYER_POLICY_2_PARAM(Merger) - -DECLARE_LAYER_POLICY_1_PARAM(Multiplication) - -DECLARE_LAYER_POLICY_2_PARAM(Normalization) - -DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId) - -DECLARE_LAYER_POLICY_2_PARAM(Pad) - -DECLARE_LAYER_POLICY_2_PARAM(Permute) - -DECLARE_LAYER_POLICY_2_PARAM(Pooling2d) - -DECLARE_LAYER_POLICY_1_PARAM(Division) - -DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear) - -DECLARE_LAYER_POLICY_2_PARAM(Reshape) - -DECLARE_LAYER_POLICY_2_PARAM(Softmax) - -DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd) - -DECLARE_LAYER_POLICY_2_PARAM(Splitter) - -DECLARE_LAYER_POLICY_1_PARAM(Subtraction) - - -// Generic implementation to get the number of input slots for a given layer type; -template -unsigned int GetNumInputs(const armnn::Layer& layer) -{ - return layer.GetNumInputSlots(); -} - -// Generic implementation to get the number of output slots for a given layer type; -template -unsigned int GetNumOutputs(const armnn::Layer& layer) -{ - return layer.GetNumOutputSlots(); -} - -template<> -unsigned int GetNumInputs(const armnn::Layer& layer) -{ - boost::ignore_unused(layer); - return 2; -} - -// Tests that the IsLayerSupported() function returns the correct value. -// We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect. -// Returns true if expectations are met, otherwise returns false. -template -bool IsLayerSupportedTest(FactoryType *factory, Tag) -{ - using LayerPolicy = LayerTypePolicy; - using LayerType = typename LayerPolicy::Type; - using LayerDesc = typename LayerPolicy::Desc; - DummyLayer layer; - - unsigned int numIn = GetNumInputs(*layer.m_Layer); - unsigned int numOut = GetNumOutputs(*layer.m_Layer); - - // Make another dummy layer just to make IsLayerSupported have valid inputs. - DummyLayer previousLayer; - // Set output of the previous layer to a dummy tensor. - armnn::TensorInfo output = MakeDummyTensorInfo(); - previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); - // Connect all outputs of the previous layer to inputs of tested layer. - for (unsigned int i = 0; i < numIn; i++) - { - armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0); - armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i); - previousLayerOutputSlot.Connect(layerInputSlot); - } - // Set outputs of tested layer to a dummy tensor. - for (unsigned int i = 0; i < numOut; i++) - { - layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); - } - - std::string layerName = LayerPolicy::NameStr; - std::string reasonIfUnsupported; - if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported)) - { - std::string errorMsg = " layer expected support but found none."; - try - { - bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr; - BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg); - return retVal; - } - catch(const armnn::InvalidArgumentException& e) - { - boost::ignore_unused(e); - // This is ok since we throw InvalidArgumentException when creating the dummy workload. - return true; - } - catch(const std::exception& e) - { - errorMsg = e.what(); - BOOST_TEST_ERROR(layerName << ": " << errorMsg); - return false; - } - catch(...) - { - errorMsg = "Unexpected error while testing support for "; - BOOST_TEST_ERROR(errorMsg << layerName); - return false; - } - } - else - { - std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some."; - try - { - bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr; - BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg); - return retVal; - } - // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them - // using parameters that make IsLayerSupported() return false should throw an - // InvalidArgumentException or UnimplementedException. - catch(const armnn::InvalidArgumentException& e) - { - boost::ignore_unused(e); - return true; - } - catch(const armnn::UnimplementedException& e) - { - boost::ignore_unused(e); - return true; - } - catch(const std::exception& e) - { - errorMsg = e.what(); - BOOST_TEST_ERROR(layerName << ": " << errorMsg); - return false; - } - catch(...) - { - errorMsg = "Unexpected error while testing support for "; - BOOST_TEST_ERROR(errorMsg << layerName); - return false; - } - } -} - -// Helper function to compute the next type in the LayerType enum. -constexpr armnn::LayerType NextType(armnn::LayerType type) -{ - return static_cast(static_cast(type)+1); -} - -// Termination function for determining the end of the LayerType enumeration. -template -bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag) -{ - return IsLayerSupportedTest(factory, Tag()); -}; - -// Recursive function to test and enter in the LayerType enum and then iterate on the next entry. -template -bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag) -{ - bool v = IsLayerSupportedTest(factory, Tag()); - - return v && - IsLayerSupportedTestsImpl - (factory, Tag()); -}; - -// Helper function to pass through to the test framework. -template -bool IsLayerSupportedTests(FactoryType *factory) -{ - return IsLayerSupportedTestsImpl(factory, Tag()); -}; - -template -bool TestLayerTypeMatches() -{ - using LayerPolicy = LayerTypePolicy; - using LayerType = typename LayerPolicy::Type; - using LayerDesc = typename LayerPolicy::Desc; - DummyLayer layer; - - std::stringstream ss; - ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value."; - bool v = Type == layer.m_Layer->GetType(); - BOOST_CHECK_MESSAGE(v, ss.str()); - return v; -}; - -template -bool LayerTypeMatchesTestImpl(Tag) -{ - return TestLayerTypeMatches(); -}; - -template -bool LayerTypeMatchesTestImpl(Tag) -{ - return TestLayerTypeMatches() && - LayerTypeMatchesTestImpl(Tag()); -}; - -template -bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported) -{ - armnn::Graph graph; - LayerType* const layer = graph.AddLayer("LayerName"); - - armnn::Layer* const input = graph.AddLayer(0, "input"); - armnn::Layer* const output = graph.AddLayer(0, "output"); - - armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType); - armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType); - - input->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); - input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo); - layer->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo); - - bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported); - - return result; -}; - -} //namespace -- cgit v1.2.1