aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-01-11 15:15:01 +0000
committerSadik Armagan <sadik.armagan@arm.com>2021-02-05 12:10:06 +0000
commit60bb9d80fa6fedfcb51afc0c9a74d6c2948873fd (patch)
tree9b578645e2b066fa971a88778f003dbbaac5608e
parenta4533faaeb07151476e074298f3403896f95668b (diff)
downloadarmnn-60bb9d80fa6fedfcb51afc0c9a74d6c2948873fd.tar.gz
MLCE-326 'Support Dilation in Conv2D in ONNX and Tensorflow Parsers'
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I4a0f07b1e8f80aff0d29405def1f33bde7944e31
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp74
-rw-r--r--src/armnnOnnxParser/test/Conv2D.cpp151
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp118
-rw-r--r--src/armnnTfParser/test/Convolution2d.cpp306
4 files changed, 439 insertions, 210 deletions
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 9f5aa1975a..b4e7133239 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -331,22 +331,28 @@ std::string TensorInfoAsString(const TensorInfo& info,
return ss.str();
}
-void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t* paddingFront,
- uint32_t* paddingBack, bool isUpper)
+void CalcPadding(uint32_t inputSize,
+ uint32_t filterSize,
+ uint32_t stride,
+ uint32_t dilation,
+ uint32_t* paddingFront,
+ uint32_t* paddingBack,
+ bool isUpper)
{
uint32_t outputSize = (inputSize + stride - 1) / stride;
- uint32_t temp = (outputSize - 1) * stride + filterSize;
+ uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
+ uint32_t temp = (outputSize - 1) * stride + dilatedSize;
*paddingFront = (temp - inputSize) / 2;
*paddingBack = *paddingFront;
if((temp - inputSize) % 2 == 1)
{
if (isUpper)
{
- *paddingBack += 1;
+ *paddingBack += 1;
}
else
{
- *paddingFront += 1;
+ *paddingFront += 1;
}
}
}
@@ -1025,8 +1031,20 @@ void OnnxParserImpl::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescr
auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
uint32_t inputHeight = inputInfo.GetShape()[2];
uint32_t inputWidth = inputInfo.GetShape()[3];
- CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, &desc.m_PadTop, &desc.m_PadBottom, isUpper);
- CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, &desc.m_PadLeft, &desc.m_PadRight, isUpper);
+ CalcPadding(inputHeight,
+ desc.m_PoolHeight,
+ desc.m_StrideY,
+ 1u,
+ &desc.m_PadTop,
+ &desc.m_PadBottom,
+ isUpper);
+ CalcPadding(inputWidth,
+ desc.m_PoolWidth,
+ desc.m_StrideX,
+ 1u,
+ &desc.m_PadLeft,
+ &desc.m_PadRight,
+ isUpper);
}
}
else
@@ -1327,25 +1345,6 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
- std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node, "dilations");
- if (!dilations.empty())
- {
- std::stringstream ss;
- ss << "[ ";
- for (auto dilation : dilations)
- {
- ss << dilation << ", ";
- if (dilation != 1u)
- {
- ss << "... ]";
- throw ParseException(
- fmt::format("ArmNN only supports Convolution layers with dilations [1,1], and node '{}' "
- "has dilatation {} {}",
- node.name(), ss.str(), CHECK_LOCATION().AsString()));
- }
- }
- }
-
Convolution2dDescriptor desc;
desc.m_BiasEnabled = false;
@@ -1361,6 +1360,13 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
desc.m_StrideY = strides[0];
}
+ std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node, "dilations");
+ if(!dilations.empty())
+ {
+ desc.m_DilationX = dilations[1];
+ desc.m_DilationY = dilations[0];
+ }
+
std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
//Check new padding version first
if(pads.empty())
@@ -1404,8 +1410,20 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
weightHeight = kernel_shape[0];
weightWidth = kernel_shape[1];
}
- CalcPadding(inputHeight, weightHeight, desc.m_StrideY, &desc.m_PadTop, &desc.m_PadBottom, isUpper);
- CalcPadding(inputWidth, weightWidth, desc.m_StrideX, &desc.m_PadLeft, &desc.m_PadRight, isUpper);
+ CalcPadding(inputHeight,
+ weightHeight,
+ desc.m_StrideY,
+ desc.m_DilationY,
+ &desc.m_PadTop,
+ &desc.m_PadBottom,
+ isUpper);
+ CalcPadding(inputWidth,
+ weightWidth,
+ desc.m_StrideX,
+ desc.m_DilationX,
+ &desc.m_PadLeft,
+ &desc.m_PadRight,
+ isUpper);
}
}
else
diff --git a/src/armnnOnnxParser/test/Conv2D.cpp b/src/armnnOnnxParser/test/Conv2D.cpp
index da67985107..a38cc192ed 100644
--- a/src/armnnOnnxParser/test/Conv2D.cpp
+++ b/src/armnnOnnxParser/test/Conv2D.cpp
@@ -438,6 +438,146 @@ struct Conv2DDimReducingFixture : public armnnUtils::ParserPrototxtFixture<armn
}
};
+struct Conv2DwithDilationFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ Conv2DwithDilationFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 6
+ }
+ dim {
+ dim_value: 6
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ dims: 1
+ dims: 3
+ dims: 3
+ data_type: 1
+ float_data: 2
+ float_data: 1
+ float_data: 0
+ float_data: 6
+ float_data: 2
+ float_data: 1
+ float_data: 4
+ float_data: 1
+ float_data: 2
+ name: "Weight"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "Output"
+ name: "Convolution"
+ op_type: "Conv"
+ attribute {
+ name: "kernel_shape"
+ ints: 3
+ ints: 3
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "auto_pad"
+ s: "VALID"
+ type: STRING
+ }
+ attribute {
+ name: "group"
+ i: 1
+ type: INT
+ }
+ attribute {
+ name: "dilations"
+ ints: 2
+ ints: 2
+ type: INTS
+ }
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
BOOST_FIXTURE_TEST_CASE(ValidConvTest, SimpleConv2DFixture)
{
RunTest<4>({{"Input", {1.0, 2.0, 3.0,
@@ -466,4 +606,15 @@ BOOST_FIXTURE_TEST_CASE(ValidConvDimReducTest, Conv2DDimReducingFixture)
1, 2, 3, 4}}});
}
+BOOST_FIXTURE_TEST_CASE(ValidConvWithDilationTest, Conv2DwithDilationFixture)
+{
+ RunTest<4>({{"Input", {1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0}}},
+ {{"Output", {39.0, 58.0, 153.0, 172.0 }}});
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index f926013faa..d13a277924 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -423,28 +423,29 @@ const std::list<std::string> ITfParser::TfParserImpl::m_ControlInputs = {
"Assert"
};
-inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
- uint32_t filterSize, bool samePadding,
- uint32_t* paddingFront, uint32_t* paddingBack) {
- *paddingFront = 0;
- *paddingBack = 0;
-
- if (samePadding) {
+void CalcPadding(uint32_t inputSize,
+ uint32_t filterSize,
+ uint32_t stride,
+ uint32_t dilation,
+ uint32_t& paddingFront,
+ uint32_t& paddingBack,
+ bool samePadding)
+{
+ paddingFront = 0;
+ paddingBack = 0;
+ if (samePadding)
+ {
uint32_t outputSize = (inputSize + stride - 1) / stride;
- uint32_t temp = (outputSize - 1) * stride + filterSize;
- if (temp > inputSize) {
- *paddingFront = (temp - inputSize) / 2;
- *paddingBack = (temp - inputSize) - *paddingFront;
+ uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
+ uint32_t temp = (outputSize - 1) * stride + dilatedSize;
+ if (temp > inputSize)
+ {
+ paddingFront = (temp - inputSize) / 2;
+ paddingBack = (temp - inputSize) - paddingFront;
}
}
}
-void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
- bool samePadding)
-{
- CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
-}
-
/// An Abstract base class which represents a single tensorflow operation (node)
/// that has been (potentially partially) converted to Armnn.
/// It may not yet have been fully converted into actual Armnn layers.
@@ -1229,22 +1230,6 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConv2D(const tensorflow::Node
std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
- // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
- std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
- if (!dilations.empty())
- {
- for (auto dilation : dilations)
- {
- if (dilation != 1u)
- {
- throw ParseException(
- fmt::format("ArmNN only supports Convolution layers with dilations [1,1,1,1] for {} {}",
- nodeDef.name(),
- CHECK_LOCATION().AsString()));
- }
- }
- }
-
Convolution2dDescriptor desc;
desc.m_BiasEnabled = false;
@@ -1259,6 +1244,13 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConv2D(const tensorflow::Node
desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
+ std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
+ if (!dilations.empty())
+ {
+ desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
+ desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
+ }
+
uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
@@ -1296,22 +1288,24 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConv2D(const tensorflow::Node
if (paddingString == "SAME")
{
padding = true;
-
- outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
- static_cast<float>(desc.m_StrideY)));
- outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
- static_cast<float>(desc.m_StrideX)));
}
else if (paddingString == "VALID")
{
padding = false;
-
- outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
- static_cast<float>(desc.m_StrideY)));
- outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
- static_cast<float>(desc.m_StrideX)));
}
+ CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
+ CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
+
+ // Calculate output height and width
+ unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
+ unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
+ outputWidth = 1 + (readWidth / desc.m_StrideX);
+
+ unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
+ unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
+ outputHeight = 1 + (readHeight / desc.m_StrideY);
+
switch (dataLayout)
{
case DataLayout::NHWC:
@@ -1331,9 +1325,6 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConv2D(const tensorflow::Node
break;
}
- CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
- CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
-
IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
weightTensor,
EmptyOptional(),
@@ -1382,6 +1373,12 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseDepthwiseConv2D(const tensorf
desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
+ std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
+ if (!dilations.empty())
+ {
+ desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
+ desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
+ }
uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
@@ -1416,22 +1413,24 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseDepthwiseConv2D(const tensorf
if (paddingString == "SAME")
{
padding = true;
-
- outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
- static_cast<float>(desc.m_StrideY)));
- outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
- static_cast<float>(desc.m_StrideX)));
}
else if (paddingString == "VALID")
{
padding = false;
-
- outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
- static_cast<float>(desc.m_StrideY)));
- outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
- static_cast<float>(desc.m_StrideX)));
}
+ CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
+ CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
+
+ // Calculate output height and width
+ unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
+ unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
+ outputWidth = 1 + (readWidth / desc.m_StrideX);
+
+ unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
+ unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
+ outputHeight = 1 + (readHeight / desc.m_StrideY);
+
switch (dataLayout)
{
case DataLayout::NHWC:
@@ -1451,9 +1450,6 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParseDepthwiseConv2D(const tensorf
break;
}
- CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
- CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
-
IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
weightTensor,
EmptyOptional(),
@@ -3094,9 +3090,9 @@ ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePooling2d(const tensorflow::N
break;
}
- CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
+ CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX, 1u,
pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
- CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
+ CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY, 1u,
pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
diff --git a/src/armnnTfParser/test/Convolution2d.cpp b/src/armnnTfParser/test/Convolution2d.cpp
index cf714894a2..c58615f990 100644
--- a/src/armnnTfParser/test/Convolution2d.cpp
+++ b/src/armnnTfParser/test/Convolution2d.cpp
@@ -37,7 +37,22 @@ struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfPa
" i: " + std::to_string(stride) + " \n");
}
- std::string dilationString = std::to_string(dilation);
+ std::string dilationString;
+ if (dataLayout == "NHWC")
+ {
+ dilationString.append(" i: 1 \n"
+ " i: " + std::to_string(dilation) + " \n"
+ " i: " + std::to_string(dilation) + " \n"
+ " i: 1 \n");
+ }
+ else // dataLayout == "NCHW"
+ {
+ dilationString.append(" i: 1 \n"
+ " i: 1 \n"
+ " i: " + std::to_string(dilation) + " \n"
+ " i: " + std::to_string(dilation) + " \n");
+ }
+
m_Prototext = "node { \n"
" name: \"graphInput\" \n"
" op: \"Placeholder\" \n"
@@ -130,16 +145,10 @@ struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfPa
m_Prototext.append(" attr { \n"
" key: \"dilations\" \n"
" value { \n"
- " list { \n"
- " i: 1 \n"
- " i: ");
- m_Prototext.append(dilationString);
- m_Prototext.append(" \n"
- " i: ");
+ " list { \n");
m_Prototext.append(dilationString);
- m_Prototext.append(" \n"
- " i: 1 \n"
- " } \n"
+
+ m_Prototext.append(" } \n"
" } \n"
" } \n");
}
@@ -167,7 +176,6 @@ struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfPa
}
};
-
struct Convolution2dNhwcSameFixture : Convolution2dFixture
{
Convolution2dNhwcSameFixture() : Convolution2dFixture("NHWC", "SAME", 1){}
@@ -262,118 +270,174 @@ BOOST_FIXTURE_TEST_CASE(ParseConv2dDilation1Nchw, Convolution2dDilation1NchwFixt
RunTest<4>({1, 2, 3, 4, 5, 6}, {2, 4, 4, 6.5f, 10 , 8.5f});
}
+struct Convolution2dDilationFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit Convolution2dDilationFixture(const std::string& dataLayout, const std::string& paddingType)
+ : Convolution2dDilationFixture(dataLayout, paddingType, 1)
+ {}
+
+ explicit Convolution2dDilationFixture(const std::string& dataLayout, const std::string& paddingType,
+ int stride, int dilation = 0)
+ {
+ std::string strideString;
+ if (dataLayout == "NHWC")
+ {
+ strideString.append(" i: 1 \n"
+ " i: " + std::to_string(stride) + " \n"
+ " i: " + std::to_string(stride) + " \n"
+ " i: 1 \n");
+ }
+ else // dataLayout == "NCHW"
+ {
+ strideString.append(" i: 1 \n"
+ " i: 1 \n"
+ " i: " + std::to_string(stride) + " \n"
+ " i: " + std::to_string(stride) + " \n");
+ }
+
+ std::string dilationString;
+ if (dataLayout == "NHWC")
+ {
+ dilationString.append(" i: 1 \n"
+ " i: " + std::to_string(dilation) + " \n"
+ " i: " + std::to_string(dilation) + " \n"
+ " i: 1 \n");
+ }
+ else // dataLayout == "NCHW"
+ {
+ dilationString.append(" i: 1 \n"
+ " i: 1 \n"
+ " i: " + std::to_string(dilation) + " \n"
+ " i: " + std::to_string(dilation) + " \n");
+ }
-BOOST_AUTO_TEST_CASE(ParseConv2dDilation2)
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " node { \n"
+ " name: \"Const_1\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " } \n"
+ " tensor_content: \"\\001\\000\\000?\\000\\000\\000?\\001\\000\\000?\" \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"potato\" \n"
+ " op: \"Conv2D\" \n"
+ " input: \"graphInput\" \n"
+ " input: \"Const_1\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"data_format\" \n"
+ " value { \n"
+ " s: \"";
+ m_Prototext.append(dataLayout);
+ m_Prototext.append("\"\n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"padding\" \n"
+ " value { \n"
+ " s: \"");
+ m_Prototext.append(paddingType);
+ m_Prototext.append("\"\n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"strides\" \n"
+ " value { \n"
+ " list { \n");
+ m_Prototext.append(strideString);
+
+ m_Prototext.append(" } \n"
+ " } \n"
+ " } \n");
+
+ if (dilation > 0)
+ {
+ m_Prototext.append(" attr { \n"
+ " key: \"dilations\" \n"
+ " value { \n"
+ " list { \n");
+ m_Prototext.append(dilationString);
+
+ m_Prototext.append(" } \n"
+ " } \n"
+ " } \n");
+ }
+ m_Prototext.append(" attr { \n"
+ " key: \"use_cudnn_on_gpu\" \n"
+ " value { \n"
+ " b: false \n"
+ " } \n"
+ " } \n"
+ "} \n");
+
+ // Manual height computation based on stride parameter.
+ std::array<unsigned int, 4> dims = { 1u, 1u, 6u, 6u };;
+
+ SetupSingleInputSingleOutput(armnn::TensorShape(4, dims.data()), "graphInput", "potato");
+ }
+};
+
+struct Convolution2dDilation2NchwValidFixture : Convolution2dDilationFixture
+{
+ Convolution2dDilation2NchwValidFixture() : Convolution2dDilationFixture("NCHW", "VALID", 1, 2){}
+};
+BOOST_FIXTURE_TEST_CASE(ParseConv2dDilation2NchwValid, Convolution2dDilation2NchwValidFixture)
{
- const char* prototext = ""
- "node {\n"
- " name: \"graphInput\"\n"
- " op: \"Placeholder\"\n"
- " attr {\n"
- " key: \"dtype\"\n"
- " value {\n"
- " type: DT_FLOAT\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"shape\"\n"
- " value {\n"
- " shape {\n"
- " }\n"
- " }\n"
- " }\n"
- "}\n"
- "node {\n"
- " name: \"Const_1\"\n"
- " op: \"Const\"\n"
- " attr {\n"
- " key: \"dtype\"\n"
- " value {\n"
- " type: DT_FLOAT\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"value\"\n"
- " value {\n"
- " tensor {\n"
- " dtype: DT_FLOAT\n"
- " tensor_shape {\n"
- " dim {\n"
- " size: 1\n"
- " }\n"
- " dim {\n"
- " size: 3\n"
- " }\n"
- " dim {\n"
- " size: 1\n"
- " }\n"
- " dim {\n"
- " size: 1\n"
- " }\n"
- " }\n"
- " tensor_content: \"\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?\"\n"
- " }\n"
- " }\n"
- " }\n"
- "}\n"
- "node {\n"
- " name: \"potato\"\n"
- " op: \"Conv2D\"\n"
- " input: \"graphInput\"\n"
- " input: \"Const_1\"\n"
- " attr {\n"
- " key: \"T\"\n"
- " value {\n"
- " type: DT_FLOAT\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"data_format\"\n"
- " value {\n"
- " s: \"NHWC\"\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"padding\"\n"
- " value {\n"
- " s: \"SAME\"\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"strides\"\n"
- " value {\n"
- " list {\n"
- " i: 1\n"
- " i: 1\n"
- " i: 1\n"
- " i: 1\n"
- " }\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"dilations\"\n"
- " value {\n"
- " list {\n"
- " i: 1\n"
- " i: 2\n"
- " i: 2\n"
- " i: 1\n"
- " }\n"
- " }\n"
- " }\n"
- " attr {\n"
- " key: \"use_cudnn_on_gpu\"\n"
- " value {\n"
- " b: false\n"
- " }\n"
- " }\n"
- "}\n";
-
- std::map<std::string, armnn::TensorShape> inputShapes;
- armnn::TensorShape tensorShape = { 1, 3, 3, 1 };
- inputShapes["graphInput"] = tensorShape;
- armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
- BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, inputShapes, { "potato" }), armnn::ParseException);
+ RunTest<4>({1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0, 10.0, 11.0, 12.0},
+ {1.5f, 3.0f, 4.5f, 6.0f, 7.5f, 9.0f, 10.5f, 12.f, 13.5f, 15.0f, 16.5f, 18.0f});
}