aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp53
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp12
-rw-r--r--src/armnn/test/GraphTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp19
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp17
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp9
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp19
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp37
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp124
9 files changed, 198 insertions, 96 deletions
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index cbc97b3c0e..af0581ce4c 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -230,11 +230,16 @@ TEST_CASE("CheckDepthwiseConvolution2dLayer")
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
-
NetworkImpl net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor);
+
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ weightsLayer->ExecuteStrategy(weightsVisitor);
layer->ExecuteStrategy(visitor);
}
@@ -254,14 +259,16 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
-
NetworkImpl net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor,
- weights,
- EmptyOptional(),
- layerName);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, layerName);
+
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, layerName);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ weightsLayer->ExecuteStrategy(weightsVisitor);
layer->ExecuteStrategy(visitor);
}
@@ -284,13 +291,21 @@ TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
- Optional<ConstTensor> optionalBiases(biases);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConstantLayerVisitor biasesVisitor(biases);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
+
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ biasesLayer->ExecuteStrategy(biasesVisitor);
layer->ExecuteStrategy(visitor);
}
@@ -314,13 +329,21 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
- Optional<ConstTensor> optionalBiases(biases);
- TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConstantLayerVisitor biasesVisitor(biases);
+ TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, layerName);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
+ IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, layerName);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
+
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ biasesLayer->ExecuteStrategy(biasesVisitor);
layer->ExecuteStrategy(visitor);
}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 4d887c8e37..00d17b4ae8 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -74,13 +74,9 @@ class TestDepthwiseConvolution2dLayerVisitor : public TestLayerVisitor
{
public:
explicit TestDepthwiseConvolution2dLayerVisitor(const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name = nullptr)
: TestLayerVisitor(name)
, m_Descriptor(descriptor)
- , m_Weights(weights)
- , m_Biases(biases)
{}
virtual ~TestDepthwiseConvolution2dLayerVisitor() {}
@@ -99,12 +95,6 @@ public:
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
- CheckConstTensors(m_Weights, constants[0]);
- if (m_Biases.has_value())
- {
- CHECK(constants.size() == 2);
- CheckConstTensors(m_Biases.value(), constants[1]);
- }
break;
}
default:
@@ -119,8 +109,6 @@ protected:
private:
DepthwiseConvolution2dDescriptor m_Descriptor;
- ConstTensor m_Weights;
- Optional<ConstTensor> m_Biases;
};
class TestFullyConnectedLayerVistor : public TestLayerVisitor
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index d3dd499850..95421c5683 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -632,8 +632,10 @@ TEST_CASE("IConnectableLayerConstantTensorsByRef")
TensorInfo weightsInfo = constInfo;
ConstTensor weights(weightsInfo, weightData);
DepthwiseConvolution2dDescriptor desc;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ // GetConstantTensorsByRef() returns {m_Weights, m_Bias} so we need to use the old AddDepthwiseConvolution2dLayer()
const auto depthwiseLayer = net->AddDepthwiseConvolution2dLayer(desc, weights, EmptyOptional(), "Depthwise");
-
+ ARMNN_NO_DEPRECATE_WARN_END
const void* resultData = depthwiseLayer->GetConstantTensorsByRef()[0].get()->GetConstTensor<void>();
auto resultValue = reinterpret_cast<const uint8_t*>(resultData);
CHECK(resultValue[0] == 3);
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index a7277b78b5..6a13dc6456 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -478,11 +478,10 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
{
armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
+ armnn::TensorInfo weightsInfo(TensorShape(4, weightsShape), armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsVector(18);
- armnn::ConstTensor weights(
- armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32, 0.0f, 0, true),
- weightsVector);
+ armnn::ConstTensor weights(weightsInfo, weightsVector);
DepthwiseConvolution2dDescriptor desc;
desc.m_BiasEnabled = false;
@@ -490,15 +489,19 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
desc.m_StrideY = 1;
desc.m_DataLayout = dataLayout;
- Layer* input = graph.AddLayer<InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(inputInfo);
-
+ InputLayer* input = graph.AddLayer<InputLayer>(0, "input");
DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output");
+
+ input->GetOutputSlot().SetTensorInfo(inputInfo);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
+ weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo);
+
+ weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
- Layer* output = graph.AddLayer<OutputLayer>(0, "output");
input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(layer->GetInputSlot(1));
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
}
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 687462dfb5..d45c9900c0 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -331,18 +331,11 @@ TEST_CASE("DepthwiseConvolutionTest")
descriptor.m_DataLayout = DataLayout::NHWC;
descriptor.m_BiasEnabled = false;
- Graph graph;
-
- auto layer = BuildGraph<DepthwiseConvolution2dLayer>(&graph,
- {{ 8, 16, 2, 1 }},
- descriptor,
- "depthwiseconv2d");
-
- const float Datum = 0.0f;
- ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
-
- RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
+ CreateGraphAndRunTest<DepthwiseConvolution2dLayer>({{ 8, 16, 2, 1 }, // input
+ { 2, 5, 3, 2 }}, // weights
+ {{ 8, 18, 1, 2 }}, // output
+ descriptor,
+ "conv2d");
}
TEST_CASE("DequantizeTest")
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 212ae0ee01..048c4f51fd 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -1928,6 +1928,7 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
if (layer->GetType() == LayerType::Multiplication)
{
IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0);
+ IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1);
const IConnectableLayer* inputLayer = &patternSubgraphInput->GetConnection()->GetOwningIConnectableLayer();
const IConnectableLayer* constantLayer = &layer->GetInputSlot(1).GetConnection()->GetOwningIConnectableLayer();
@@ -1935,7 +1936,7 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
// Figure out which of the two inputs is the constant
if (constantLayer->GetType() != LayerType::Constant)
{
- patternSubgraphInput = &layer->GetInputSlot(1);
+ std::swap(patternSubgraphInput, patternSubgraphConstant);
std::swap(inputLayer, constantLayer);
}
@@ -1965,7 +1966,7 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
ConstTensor weights(weightsInfo, weightData);
const auto depthwiseLayer = replacementGraph->AddDepthwiseConvolution2dLayer(
- desc, weights, armnn::EmptyOptional(), "Replacement for Constant-Multiplication");
+ desc, "Replacement for Constant-Multiplication");
auto& outslot = layer->GetOutputSlot(0);
SubgraphView::IOutputSlots outputs{ &outslot };
@@ -1973,7 +1974,9 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
layers.push_back(layer);
layers.push_back(const_cast<IConnectableLayer*>(constantLayer));
- SubgraphView patternSubgraph(std::move(layers), {patternSubgraphInput}, {&layer->GetOutputSlot(0)});
+ SubgraphView patternSubgraph(std::move(layers),
+ {patternSubgraphInput, patternSubgraphConstant},
+ {&layer->GetOutputSlot(0)});
subgraph.SubstituteSubgraph(patternSubgraph, depthwiseLayer );
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 2f9e1c6d31..9919c6d0e6 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -126,14 +126,18 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
"depthwiseConv2d");
- depthwiseConv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
+ auto* weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+
+ weightsLayer->GetOutputSlot().SetTensorInfo(weights.GetInfo());
depthwiseConv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
+ depthwiseConv2dLayer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
// Connect up layers - input -> pad -> depthwiseConv2d -> output
input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
padLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(1));
depthwiseConv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
auto checkSimpleDepthwiseConv2d = [](const Layer* const layer)->bool {
@@ -151,6 +155,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimpleDepthwiseConv2d,
+ &IsLayerOfType<ConstantLayer>,
&IsLayerOfType<OutputLayer>));
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoDepthwiseConvolution2d()));
@@ -170,6 +175,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoDepthwiseConv2d,
+ &IsLayerOfType<ConstantLayer>,
&IsLayerOfType<OutputLayer>));
}
@@ -741,11 +747,8 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp
std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8};
TensorInfo biasInfo({12}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
- Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
IConnectableLayer* conv2dLayer = network->AddDepthwiseConvolution2dLayer(convDescriptor,
- weights,
- optionalBias,
"DepthwiseConv2D");
TensorInfo outputInfo(4, outputShape, DataType::Float32);
@@ -758,6 +761,14 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp
padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1));
+
+ auto biasLayer = network->AddConstantLayer(bias, "Bias");
+ biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo());
+ biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2));
+
// Create ArmNN runtime
IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
// Optimise the network
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index ac327bb609..e5f54208f0 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -90,7 +90,7 @@ struct DWConvolution2dTest
public:
using LayerType = DepthwiseConvolution2dLayer;
static const bool isElementWise = false;
- static const bool isConstTensorAsInputSupported = false;
+ static const bool isConstTensorAsInputSupported = true;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // [N,H,W,Cin]
static TensorShape GetOutputShape() { return TensorShape( {1, 3, 3, 12}); } // [N,H,W,Cout]
@@ -104,32 +104,35 @@ public:
float scale = 1.f,
int32_t offset = 0)
{
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+
DepthwiseConvolution2dDescriptor descriptor;
descriptor.m_BiasEnabled = false;
descriptor.m_DataLayout = DataLayout::NHWC;
descriptor.m_StrideX = 1;
descriptor.m_StrideY = 1;
- std::vector<float> weightsData = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
- 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
- std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
- TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
- ConstTensor weights(weightsInfo, weightsVector);
- Optional<ConstTensor> optionalBias;
-
- return network->AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBias, name);
+ return network->AddDepthwiseConvolution2dLayer(descriptor, name);
}
static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
float scale = 1.f,
int32_t offset = 0)
{
- IgnoreUnused(network);
- IgnoreUnused(scale);
- IgnoreUnused(offset);
- return {};
+ std::vector<float> weightsData = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
+ std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
+ TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
+ ConstTensor weights(weightsInfo, weightsVector);
+
+ IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+
+ std::vector<IConnectableLayer*> layers = { weightsLayer };
+ return layers;
}
};
@@ -390,10 +393,10 @@ INetworkPtr CreateNetwork(ActivationDescriptor activationDescriptor, bool preven
"activation");
IConnectableLayer* outputLayer = network->AddOutputLayer(0);
- IConnectableLayer* output2Layer = preventFusing?network->AddOutputLayer(1):nullptr;
+ IConnectableLayer* output2Layer = preventFusing ? network->AddOutputLayer(1) : nullptr;
// If ConstTensorAsInputs is supported weights and bias are stored as constant layers.
- if(LayerTest::isConstTensorAsInputSupported)
+ if (LayerTest::isConstTensorAsInputSupported)
{
std::vector<IConnectableLayer*> constantLayers = LayerTest::AddConstantLayers(network.get(),
scale,
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 70cffea2b2..b28bb17773 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -24,6 +24,7 @@ class Conv2dTest
public:
using ConvDescriptorType = armnn::Convolution2dDescriptor;
using ConvLayerType = armnn::Convolution2dLayer;
+ static const bool isConstTensorAsInputSupported = false;
static IConnectableLayer *AddConvolution(INetwork *network,
const Convolution2dDescriptor &descriptor,
@@ -33,6 +34,19 @@ public:
{
return network->AddConvolution2dLayer(descriptor, weights, biases, name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network,
+ const Convolution2dDescriptor &descriptor,
+ const ConstTensor &weights,
+ const Optional<ConstTensor> &biases)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+
+ return {};
+ }
};
class DepthwiseConv2dTest
@@ -40,6 +54,7 @@ class DepthwiseConv2dTest
public:
using ConvDescriptorType = armnn::DepthwiseConvolution2dDescriptor;
using ConvLayerType = armnn::DepthwiseConvolution2dLayer;
+ static const bool isConstTensorAsInputSupported = true;
static IConnectableLayer *AddConvolution(INetwork *network,
const DepthwiseConvolution2dDescriptor &descriptor,
@@ -47,7 +62,29 @@ public:
const Optional<ConstTensor> &biases,
const char *name)
{
- return network->AddDepthwiseConvolution2dLayer(descriptor, weights, biases, name);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+
+ return network->AddDepthwiseConvolution2dLayer(descriptor, name);
+ }
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network,
+ const DepthwiseConvolution2dDescriptor &descriptor,
+ const ConstTensor &weights,
+ const Optional<ConstTensor> &biases)
+ {
+ auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ std::vector<IConnectableLayer*> layers = {weightsLayer};
+
+ if (descriptor.m_BiasEnabled)
+ {
+ auto biasLayer = network->AddConstantLayer(biases.value(), "Bias");
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo());
+ layers.emplace_back(biasLayer);
+ }
+
+ return layers;
}
};
@@ -73,7 +110,7 @@ template <typename Conv2dTest,
armnn::DataType ArmnnType,
typename ConvDescriptorType = typename Conv2dTest::ConvDescriptorType,
typename T = armnn::ResolveType<ArmnnType>>
-INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
+INetworkPtr CreateNetwork(bool depthwise, bool preventFusing)
{
// Define layers information
ConvDescriptorType convolution2dDescriptor;
@@ -110,11 +147,6 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsVector);
- std::vector<T> biasVector = GetVector<T>(outputDimensionSizes[3], 3.3f, 0.1f);
- TensorInfo biasInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true);
- ConstTensor bias(biasInfo, biasVector);
- Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
-
std::vector<T> betaVector = GetVector<T>(outputDimensionSizes[3], 0.0f, 0.2f);
std::vector<T> gammaVector = GetVector<T>(outputDimensionSizes[3], 0.5f, 0.1f);
std::vector<T> meanVector = GetVector<T>(outputDimensionSizes[3], 0.1f, 0.1f);
@@ -133,7 +165,7 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
IConnectableLayer* convLayer = Conv2dTest::AddConvolution(network.get(),
convolution2dDescriptor,
weights,
- optionalBias,
+ Optional<ConstTensor>(),
"convolution");
IConnectableLayer* batchNormLayer = network->AddBatchNormalizationLayer(batchNormDescriptor,
@@ -151,6 +183,21 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
output2Layer = network->AddOutputLayer(1);
}
+ // If ConstTensorAsInputs is supported weights and bias are stored as constant layers.
+ if (Conv2dTest::isConstTensorAsInputSupported)
+ {
+ std::vector<IConnectableLayer*> constantLayers = Conv2dTest::AddConstantLayers(network.get(),
+ convolution2dDescriptor,
+ weights,
+ Optional<ConstTensor>());
+
+ // Connect constant layers to receiverLayer.
+ for (unsigned int i = 0; i < constantLayers.size(); ++i)
+ {
+ constantLayers[i]->GetOutputSlot(0).Connect(convLayer->GetInputSlot(i + 1));
+ }
+ }
+
// Set layer information
inputLayer ->GetOutputSlot(0).SetTensorInfo(inputInfo);
convLayer ->GetOutputSlot(0).SetTensorInfo(outputInfo);
@@ -178,7 +225,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
{
// FIRST NETWORK: Fused
// Construct ArmNN network
- INetworkPtr networkFused = CreatNetwork<Conv2dTest, ArmnnType>(depthwise, false);
+ INetworkPtr networkFused = CreateNetwork<Conv2dTest, ArmnnType>(depthwise, false);
// Create ArmNN runtime
IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
@@ -194,12 +241,26 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
(layer->GetNameStr() == "fused-batchNorm-into-convolution");
};
- CHECK(3 == graphFused.GetNumLayers());
- CHECK(CheckSequence(graphFused.cbegin(),
- graphFused.cend(),
- &IsLayerOfType<InputLayer>,
- checkFusedConv2d,
- &IsLayerOfType<OutputLayer>));
+ if (Conv2dTest::isConstTensorAsInputSupported)
+ {
+ CHECK(5 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
+ graphFused.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ checkFusedConv2d,
+ &IsLayerOfType<OutputLayer>));
+ }
+ else
+ {
+ CHECK(3 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
+ graphFused.cend(),
+ &IsLayerOfType<InputLayer>,
+ checkFusedConv2d,
+ &IsLayerOfType<OutputLayer>));
+ }
// Load network into runtime
NetworkId networkIdentifier;
@@ -227,7 +288,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// SECOND NETWORK: NotFused
// Construct ArmNN network
- INetworkPtr networkNotFused = CreatNetwork<Conv2dTest, ArmnnType>(depthwise, true);
+ INetworkPtr networkNotFused = CreateNetwork<Conv2dTest, ArmnnType>(depthwise, true);
// Create ArmNN runtime
IRuntimePtr runNotFused = IRuntime::Create(IRuntime::CreationOptions()); // default options
@@ -237,14 +298,29 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
- CHECK(5 == graphNotFused.GetNumLayers());
- CHECK(CheckSequence(graphNotFused.cbegin(),
- graphNotFused.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<ConvLayerType>,
- &IsLayerOfType<armnn::BatchNormalizationLayer>,
- &IsLayerOfType<armnn::OutputLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ if (Conv2dTest::isConstTensorAsInputSupported)
+ {
+ CHECK(6 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
+ graphNotFused.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<ConvLayerType>,
+ &IsLayerOfType<armnn::BatchNormalizationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+ }
+ else
+ {
+ CHECK(5 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
+ graphNotFused.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<ConvLayerType>,
+ &IsLayerOfType<armnn::BatchNormalizationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+ }
// Load network into runtime
NetworkId networkIdentifierNotFused;