aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorjimfly01 <jim.flynn@arm.com>2018-12-04 10:09:52 +0000
committerjimfly01 <jim.flynn@arm.com>2018-12-05 12:15:48 +0000
commitf6ba747c0802d87ba30aecd598f0603f9bd18576 (patch)
treedf892f37ed1edc13e9ed05d9ac72f67a1fd622a3 /src
parent2135015779092e259ad5d5df185eda0c34b56359 (diff)
downloadarmnn-f6ba747c0802d87ba30aecd598f0603f9bd18576.tar.gz
IVGCVSW-2296 Add ParsePad method to TfParser
* Also added unit test armnnTfParser/test/Pad.cpp * Added missing 'Pad' entry to GetLayerTypeAsCString(LayerType) * Fixed the RefLayerSupport.IsPadSupported, now it returns true * Small fix in Optimize. Only resolve stringstream to string once Change-Id: Ieaa1886858a48cd761ac5f30454f73e44bdd4b8f
Diffstat (limited to 'src')
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnnTfParser/TfParser.cpp140
-rw-r--r--src/armnnTfParser/TfParser.hpp3
-rw-r--r--src/armnnTfParser/test/Pad.cpp107
-rw-r--r--src/backends/reference/RefLayerSupport.cpp7
6 files changed, 252 insertions, 11 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 8a4f892583..cbf6f8a257 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -48,6 +48,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Splitter: return "Splitter";
case LayerType::StridedSlice: return "StridedSlice";
case LayerType::Subtraction: return "Subtraction";
+ case LayerType::Pad: return "Pad";
default:
BOOST_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 2cb3edcf3f..0cf0ed36fd 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -272,9 +272,10 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
<< " for data type " << GetDataTypeName(dataType)
<< " (reason: " << reasonIfUnsupported
<< "), falling back to the next backend.";
- BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
+ std::string wMsg = warningMsg.str();
+ BOOST_LOG_TRIVIAL(warning) << wMsg;
if (errMessages) {
- errMessages.value().push_back(warningMsg.str());
+ errMessages.value().push_back(wMsg);
}
}
else
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 09769ecf6e..4ddcdce1c7 100644
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -370,6 +370,7 @@ const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_Ope
{ "AvgPool", &TfParser::ParseAvgPool },
{ "Maximum", &TfParser::ParseMaximum },
{ "Minimum", &TfParser::ParseMinimum },
+ { "Pad", &TfParser::ParsePad },
};
ITfParser* ITfParser::CreateRaw()
@@ -986,15 +987,17 @@ template<typename Type>
bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
{
auto it = m_ParsedTfOperations.find(nodeName);
- if (it == m_ParsedTfOperations.end() ||
- dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) == nullptr)
+ if (it == m_ParsedTfOperations.end())
{
return false;
}
- else
- {
- return true;
- }
+ return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
+}
+
+template<typename Type>
+bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
+{
+ return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
}
ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
@@ -1479,6 +1482,131 @@ ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
}
+unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
+ const TensorInfo& inputTensorInfo,
+ const std::string& nodeName)
+{
+ unsigned int rank = paddingTensor.GetShape()[0];
+ unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
+ if (rank != expectedRank)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
+ % expectedRank
+ % rank
+ % nodeName
+ % CHECK_LOCATION().AsString()));
+ }
+ unsigned int second = paddingTensor.GetShape()[1];
+ if (second != 2)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
+ % rank
+ % second
+ % nodeName
+ % CHECK_LOCATION().AsString()));
+ }
+ return rank;
+}
+
+TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList)
+{
+ unsigned int numDims = inputTensorInfo.GetNumDimensions();
+ std::vector<unsigned int> outDims;
+ for (unsigned int i = 0; i < numDims; ++i)
+ {
+ unsigned int dimSize = inputTensorInfo.GetShape()[i];
+ const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
+ dimSize += dimPadding.first;
+ dimSize += dimPadding.second;
+ outDims.push_back(dimSize);
+ }
+ TensorInfo paddedTensorInfo = inputTensorInfo;
+ unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
+ paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
+ return paddedTensorInfo;
+}
+
+ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
+ const tensorflow::GraphDef& graphDef)
+{
+ // input consists of:
+ // input[0] the tensor which will be padded
+ // input[1] the tensor holding the padding values
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
+ IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
+ TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
+ if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports Pad with constant padding. "
+ "Input %1%. Node %2% %3%")
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+
+ }
+ ParsedConstTfOperation<int32_t>* paddingTensorOp =
+ boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
+
+ std::vector<int32_t> paddingTensorData;
+ ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(false, paddingTensorData);
+ // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
+ // and should match the rank of the input tensor that is being padded.
+ // For each dimension D of input, paddings[D, 0] indicates how many values to add
+ // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
+ // many values to add after the contents of tensor in that dimension
+ // This needs to be translated into a padList for ACL
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
+ for (unsigned int i = 0; i < rank; ++i)
+ {
+ std::pair<unsigned int, unsigned int> paddingForDim;
+ for (unsigned int j = 0; j < 2; j++)
+ {
+ unsigned int index = (i * 2) + j;
+ int paddingAmount = paddingTensorData[index];
+ // make sure we can cast to an unsigned value
+ if (paddingAmount < 0)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
+ % paddingAmount
+ % i
+ % j
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ if (j == 0)
+ {
+ paddingForDim.first = static_cast<unsigned int>(paddingAmount);
+ }
+ else
+ {
+ paddingForDim.second = static_cast<unsigned int>(paddingAmount);
+ }
+ }
+ padList.push_back(paddingForDim);
+ }
+ PadDescriptor padDescriptor(padList);
+ IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
+ previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
+ // Use the padding to calculate the new output tensor shape
+ TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+ return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
+}
+
ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index 7abf783c38..1c29ce2717 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -124,6 +124,8 @@ private:
/// Checks if there is a pre-parsed const tensor available with the given name and Type.
template<typename Type>
bool HasParsedConstTensor(const std::string & nodeName) const;
+ template<typename Type>
+ bool HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const;
ParsedTfOperationPtr ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
@@ -153,6 +155,7 @@ private:
armnn::PoolingAlgorithm pooltype);
ParsedTfOperationPtr ParseMaximum(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParseMinimum(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
+ ParsedTfOperationPtr ParsePad(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef& nodeDef, armnn::ActivationDescriptor& desc);
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd = false);
ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef& nodeDef);
diff --git a/src/armnnTfParser/test/Pad.cpp b/src/armnnTfParser/test/Pad.cpp
new file mode 100644
index 0000000000..8bfe970dfa
--- /dev/null
+++ b/src/armnnTfParser/test/Pad.cpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct PadFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ PadFixture() {
+ m_Prototext = "node {\n"
+ " name: \"input\"\n"
+ " op: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"shape\"\n"
+ " value {\n"
+ " shape {\n"
+ " dim {\n"
+ " size: -1\n"
+ " }\n"
+ " dim {\n"
+ " size: 2\n"
+ " }\n"
+ " dim {\n"
+ " size: 2\n"
+ " }\n"
+ " dim {\n"
+ " size: 2\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "node {\n"
+ " name: \"Pad/paddings\"\n"
+ " op: \"Const\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_INT32\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"value\"\n"
+ " value {\n"
+ " tensor {\n"
+ " dtype: DT_INT32\n"
+ " tensor_shape {\n"
+ " dim {\n"
+ " size: 4\n"
+ " }\n"
+ " dim {\n"
+ " size: 2\n"
+ " }\n"
+ " }\n"
+ " tensor_content: \"\\000\\000\\000\\000\\000\\000\\000\\000"
+ "\\001\\000\\000\\000\\001\\000\\000\\000"
+ "\\001\\000\\000\\000\\001\\000\\000\\000"
+ "\\000\\000\\000\\000\\000\\000\\000\\000\"\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "node {\n"
+ " name: \"Pad\"\n"
+ " op: \"Pad\"\n"
+ " input: \"input\"\n"
+ " input: \"Pad/paddings\"\n"
+ " attr {\n"
+ " key: \"T\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"Tpaddings\"\n"
+ " value {\n"
+ " type: DT_INT32\n"
+ " }\n"
+ " }\n"
+ "}";
+
+ SetupSingleInputSingleOutput({1, 2, 2, 2}, "input", "Pad");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParsePad, PadFixture)
+{
+ RunTest<4>({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f },
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 5.0f, 6.0f, 7.0f, 8.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ });
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index fffea587a0..3b49fa0e51 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -400,11 +400,12 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input);
ignore_unused(output);
ignore_unused(descriptor);
- ignore_unused(reasonIfUnsupported);
- return false;
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
}
bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,