aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfParser
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
commitc577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch)
treebd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnnTfParser
parent4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff)
downloadarmnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz
Release 18.08
Diffstat (limited to 'src/armnnTfParser')
-rw-r--r--src/armnnTfParser/README.md2
-rw-r--r--src/armnnTfParser/TensorFlowSupport.md9
-rw-r--r--src/armnnTfParser/TfParser.cpp927
-rw-r--r--src/armnnTfParser/TfParser.hpp48
-rw-r--r--src/armnnTfParser/test/Activations.cpp6
-rw-r--r--src/armnnTfParser/test/Addition.cpp2
-rw-r--r--src/armnnTfParser/test/BiasAdd.cpp2
-rw-r--r--src/armnnTfParser/test/BroadcastForAdd.cpp6
-rw-r--r--src/armnnTfParser/test/Concat.cpp2
-rw-r--r--src/armnnTfParser/test/ConcatOfConcats.cpp2
-rw-r--r--src/armnnTfParser/test/Constant.cpp20
-rw-r--r--src/armnnTfParser/test/Convolution2d.cpp15
-rw-r--r--src/armnnTfParser/test/DepthwiseConvolution2d.cpp2
-rw-r--r--src/armnnTfParser/test/FullyConnected.cpp38
-rw-r--r--src/armnnTfParser/test/FusedBatchNorm.cpp6
-rw-r--r--src/armnnTfParser/test/Identity.cpp6
-rw-r--r--src/armnnTfParser/test/LocalResponseNormalization.cpp3
-rw-r--r--src/armnnTfParser/test/MaximumForLeakyRelu.cpp169
-rw-r--r--src/armnnTfParser/test/MultiOutput.cpp6
-rw-r--r--src/armnnTfParser/test/Multiplication.cpp4
-rw-r--r--src/armnnTfParser/test/PassThru.cpp4
-rw-r--r--src/armnnTfParser/test/Pooling.cpp3
-rw-r--r--src/armnnTfParser/test/Reshape.cpp3
-rw-r--r--src/armnnTfParser/test/ResizeBilinear.cpp6
-rw-r--r--src/armnnTfParser/test/Shape.cpp7
-rw-r--r--src/armnnTfParser/test/Softmax.cpp2
-rw-r--r--src/armnnTfParser/test/Squeeze.cpp3
-rw-r--r--src/armnnTfParser/test/TestDependencies.cpp26
-rw-r--r--src/armnnTfParser/test/TestMultiInputsOutputs.cpp10
29 files changed, 972 insertions, 367 deletions
diff --git a/src/armnnTfParser/README.md b/src/armnnTfParser/README.md
index 49c46086ed..e4aed65b94 100644
--- a/src/armnnTfParser/README.md
+++ b/src/armnnTfParser/README.md
@@ -1,5 +1,5 @@
# The Arm NN TensorFlow parser
-`armnnTfParser` is a library for loading Neural Networks defined by TensorFlow protobuf files into the Arm NN runtime.
+`armnnTfParser` is a library for loading neural networks defined by TensorFlow protobuf files into the Arm NN runtime.
For more information about the TensorFlow operators that are supported, and the networks that have been tested, see [TensorFlowSupport.md](./TensorFlowSupport.md) \ No newline at end of file
diff --git a/src/armnnTfParser/TensorFlowSupport.md b/src/armnnTfParser/TensorFlowSupport.md
index ad8efa89d1..89c47377f6 100644
--- a/src/armnnTfParser/TensorFlowSupport.md
+++ b/src/armnnTfParser/TensorFlowSupport.md
@@ -104,7 +104,16 @@ The parser only supports `ResizeMethod.BILINEAR` with `align_corners=False`. See
The parser only supports 2D inputs and does not support selecting the `softmax` dimension. See the TensorFlow [softmax documentation](https://www.tensorflow.org/api_docs/python/tf/nn/softmax) for more information.
+**maximum**
+where maximum is used in one of the following ways
+
+* max(mul(a, x), x)
+* max(mul(x, a), x)
+* max(x, mul(a, x))
+* max(x, mul(x, a)
+
+This is interpreted as a ActivationLayer with a LeakyRelu activation function. Any other usage of max will currently cause an unsupported error. See the TensorFlow [maximum documentation](https://www.tensorflow.org/api_docs/python/tf/maximum) for more information.
## Tested networks
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 834c0dd41b..5bc2ad7d18 100644
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -12,6 +12,7 @@
#include <GraphTopologicalSort.hpp>
#include <Permute.hpp>
+#include <VerificationHelpers.hpp>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>
@@ -47,13 +48,13 @@ const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const PermutationVector& mapping,
const std::string& name)
{
- // Add swizzle layer
+ // Adds swizzle layer.
IConnectableLayer* const layer = network.AddPermuteLayer(mapping, name.c_str());
- // Connect intput to swizzle layer
+ // Connects intput to swizzle layer.
input.Connect(layer->GetInputSlot(0));
- // Setup swizzled output
+ // Sets up swizzled output.
const TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mapping);
layer->GetOutputSlot(0).SetTensorInfo(outInfo);
@@ -63,13 +64,13 @@ IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const
IConnectableLayer* SwizzleInDeswizzleOut(INetwork& network, IOutputSlot& input, IConnectableLayer& layer,
const std::string& name)
{
- // Add swizzle layer
+ // Adds swizzle layer.
IConnectableLayer* const swizzleLayer = AddSwizzleLayer(network, input, NHWCToArmNN, "swizzle_for-" + name);
- // Connect swizzledInput to layer
+ // Connects swizzledInput to layer.
swizzleLayer->GetOutputSlot(0).Connect(layer.GetInputSlot(0));
- // Add deswizzle layer
+ // Adds deswizzle layer.
IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(network, layer.GetOutputSlot(0), ArmNNToNHWC,
"deswizzle_for-" + name);
@@ -92,19 +93,27 @@ void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
}
else
{
- throw ParseException(boost::str(boost::format(
- "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
- "but found %4% instead")
- % attribName
- % nodeDef.name()
- % static_cast<int>(expectedValueCase)
- % static_cast<int>(attrValue.value_case())));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
+ "but found %4% instead %5%")
+ % attribName
+ % nodeDef.name()
+ % static_cast<int>(expectedValueCase)
+ % static_cast<int>(attrValue.value_case())
+ % CHECK_LOCATION().AsString()));
}
}
else
{
- throw ParseException(boost::str(boost::format("Could not find required attribute %1% in node %2%")
- % attribName % nodeDef.name()));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Could not find required attribute %1% in node %2% %3%")
+ % attribName
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
}
@@ -124,13 +133,16 @@ void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
}
else
{
- throw ParseException(boost::str(boost::format(
- "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
- "but found %4% instead")
- % attribName
- % nodeDef.name()
- % static_cast<int>(expectedValueCase)
- % static_cast<int>(attrValue.value_case())));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
+ "but found %4% instead %5%")
+ % attribName
+ % nodeDef.name()
+ % static_cast<int>(expectedValueCase)
+ % static_cast<int>(attrValue.value_case())
+ % CHECK_LOCATION().AsString()));
}
}
}
@@ -233,11 +245,16 @@ TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& t
{
if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
{
- throw ParseException("At most one component of shape can be -1");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "At most one component of shape can be -1 %1%")
+ % CHECK_LOCATION().AsString()));
}
- auto targetNumElements = boost::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
- -1, std::multiplies<int32_t>()));
+ auto targetNumElements =
+ boost::numeric_cast<unsigned int>(
+ std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
}
@@ -248,7 +265,7 @@ TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& t
return reshapeInfo;
}
-// We need the input0Slot to guide the reshape for input1Slot
+// We need the input0Slot to guide the reshape for input1Slot.
IOutputSlot* BroadcastForAddandMul(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC, INetwork& m_Network,
const tensorflow::NodeDef& nodeDef)
{
@@ -284,13 +301,44 @@ OutputId ParseOutputId(const std::string & name)
int n = std::stoi(name.substr(colonPos+1));
if (n<0 || n>100)
{
- throw ParseException("Output tensor id is out of range for "+name);
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Output tensor id is out of range for %1% %2%")
+ % name
+ % CHECK_LOCATION().AsString()));
}
outputNum = static_cast<unsigned int>(n);
}
return OutputId(name.substr(0,colonPos),outputNum);
}
+#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
+ if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
+ { \
+ throw ParseException( \
+ boost::str( \
+ boost::format( \
+ "Unsupported data format %1% passed for %2% node %3%. " \
+ "Only NHWC and NCHW supported %4%") \
+ % FORMAT \
+ % NODE_TYPE \
+ % NODE_DEF.name() \
+ % CHECK_LOCATION().AsString())); \
+ }
+
+#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
+ if(PADDING != "SAME" && PADDING != "VALID" ) \
+ { \
+ throw ParseException( \
+ boost::str( \
+ boost::format( \
+ "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
+ % PADDING \
+ % NODE_DEF.name() \
+ % CHECK_LOCATION().AsString())); \
+ } \
+
} // namespace
const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
@@ -318,6 +366,7 @@ const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_Ope
{ "Tanh", &TfParser::ParseTanh },
{ "MaxPool", &TfParser::ParseMaxPool },
{ "AvgPool", &TfParser::ParseAvgPool },
+ { "Maximum", &TfParser::ParseMaximum },
};
ITfParser* ITfParser::CreateRaw()
@@ -402,13 +451,18 @@ public:
IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
{
BOOST_ASSERT(m_Layer);
- // Assume one-to-one mapping between Tf and armnn output slots.
+ // Assumes one-to-one mapping between Tf and armnn output slots.
unsigned int armnnOutputSlotIdx = tfOutputIndex;
if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
{
throw ParseException(
- boost::str(boost::format("The requested output slot #%1% "
- "for %2% does not exist") % armnnOutputSlotIdx % m_Layer->GetName()));
+ boost::str(
+ boost::format(
+ "The requested output slot #%1% "
+ "for %2% does not exist %3%")
+ % armnnOutputSlotIdx
+ % m_Layer->GetName()
+ % CHECK_LOCATION().AsString()));
}
return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
}
@@ -417,7 +471,7 @@ protected:
IConnectableLayer* m_Layer;
};
-/// A SingleLayerParsedTfOperation for deferred layer creation
+/// A SingleLayerParsedTfOperation for deferred layer creation.
class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
{
public:
@@ -455,7 +509,13 @@ const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeD
if (nodeDef->input_size() != 1)
{
- throw ParseException("Identity node does not have correct amount of inputs!");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Identity node should have a single input! %1% has %2% inputs %3%")
+ % nodeDef->name()
+ % nodeDef->input_size()
+ % CHECK_LOCATION().AsString()));
}
auto it = m_NodesByName.find(nodeDef->input(0));
@@ -466,7 +526,12 @@ const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeD
}
else
{
- throw ParseException("Cannot find what the Identity node is linked to!");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Cannot find what the Identity node %1% is linked to! %2%")
+ % nodeDef->name()
+ % CHECK_LOCATION().AsString()));
}
}
@@ -489,15 +554,25 @@ TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
{
throw ParseException(
- "Node '" + nodeDef.name() + "' has Control Input '" + nodeDef.input(j) + "' which is unsupported.");
+ boost::str(
+ boost::format(
+ "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
+ % nodeDef.name()
+ % nodeDef.input(j)
+ % j
+ % CHECK_LOCATION().AsString()));
}
auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
if (inputIt == m_NodesByName.end())
{
throw ParseException(
- "Can't find node '" + nodeDef.input(j) +
- "', which is listed as an input of '" + nodeDef.name() + "'");
+ boost::str(
+ boost::format(
+ "Can't find node '%1%', which is listed as an input of '%2%' %3%")
+ % nodeDef.input(j)
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
}
@@ -509,22 +584,33 @@ std::vector<OutputOfParsedTfOperation>
TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
std::size_t expectedNumInputs)
{
- // Fetch the tensorflow nodes connected as inputs and validate the size.
+ // Fetches the tensorflow nodes connected as inputs and validate the size.
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
const std::size_t numInputs = nodes.size();
if (numInputs != expectedNumInputs)
{
- throw ParseException(boost::str(boost::format("Unexpected number of inputs for node %1%. "
- "Expected %2%, found %3%") % nodeDef.name() % expectedNumInputs % numInputs));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
+ % nodeDef.name()
+ % expectedNumInputs
+ % numInputs
+ % CHECK_LOCATION().AsString()));
}
- // Fetch the corresponding ParsedTfOperation operations
+ // Fetches the corresponding ParsedTfOperation operations
std::vector<OutputOfParsedTfOperation> result;
for (auto&& node : nodes)
{
auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
if (it == m_ParsedTfOperations.end())
{
- throw ParseException("Node with name '" + node.m_IndexedValue->name() + "' has not been parsed");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Node with name '%1%' has not been parsed %2%")
+ % node.m_IndexedValue->name()
+ % CHECK_LOCATION().AsString()));
}
ParsedTfOperation* parsedOp = it->second.get();
// Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
@@ -538,7 +624,8 @@ ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, cons
{
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
- // If one of the inputs is a MatMul and the other is a const, then we handle both nodes together as FullyConnected
+ // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
+ // together as FullyConnected.
if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
@@ -557,7 +644,7 @@ ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, cons
}
else
{
- // Otherwise it's just a regular addition
+ // Otherwise it's just a regular addition.
return AddAdditionLayer(nodeDef);
}
}
@@ -625,8 +712,8 @@ public:
ConstTensor GetConstTensor(bool swizzleForConvolutionWeights, std::vector<T>& outputTensorData) const
{
// Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
- // Tensorflow weights are [H, W, In, Out]
- // ArmNN weights are [Out, In, H, W]
+ // Tensorflow weights are [H, W, In, Out].
+ // ArmNN weights are [Out, In, H, W].
static const PermutationVector HWIOToOIHW = {2, 3, 1, 0};
const TensorInfo outInfo = swizzleForConvolutionWeights
@@ -635,7 +722,7 @@ public:
outputTensorData.resize(m_TensorInfo.GetNumElements());
- // Copy or swizzle from the permanent storage into the storage the caller provided.
+ // Copies or swizzles from the permanent storage into the storage the caller provided.
if (swizzleForConvolutionWeights)
{
armnnUtils::Permute(outInfo.GetShape(), HWIOToOIHW, m_Storage.data(), outputTensorData.data());
@@ -644,7 +731,7 @@ public:
{
memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
}
- // Update the result to point to the user provided storage
+ // Updates the result to point to the user provided storage.
ConstTensor constTensor(outInfo, outputTensorData);
return constTensor;
}
@@ -656,7 +743,8 @@ private:
TensorInfo m_TensorInfo;
};
-DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType)
+DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
+ const tensorflow::NodeDef& nodeDef)
{
switch (tfDataType)
{
@@ -667,9 +755,13 @@ DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType)
return DataType::Signed32;
break;
default:
- throw ParseException(boost::str(
- boost::format("Unknown DataType %1% for node")
- % tensorflow::DataType_Name(tfDataType)));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Unknown DataType %1% for node %2% %3%")
+ % tensorflow::DataType_Name(tfDataType)
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
}
@@ -685,30 +777,30 @@ struct ParseTfTensorValueList
static void ReadData(const void* srcData, unsigned int numSrcElements,
std::vector<int8_t>& dstData, unsigned int numDstElements)
{
- // If there are no entries in the list, perform no action
+ // If there are no entries in the list, perform no action.
if (numSrcElements == 0)
{
return;
}
- // If no size was provided, use the length of the value list
+ // If no size was provided, use the length of the value list.
if (numDstElements == 0)
{
numDstElements = numSrcElements;
}
- // Allocate memory
+ // Allocates memory.
dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
- // Copy the value list entries into the destination
+ // Copies the value list entries into the destination.
std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
if (numDstElements > numSrcElements)
{
- // Use the last element in the list to fill the remaining entries
+ // Uses the last element in the list to fill the remaining entries.
std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
}
}
@@ -792,9 +884,12 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
if (nodeDef.attr().count("value") == 0)
{
- throw ParseException(boost::str(
- boost::format("Value not found for Const node - %1%")
- % nodeDef.name()));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Value not found for Const node - %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
@@ -807,8 +902,8 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
std::back_inserter(dimensionSizes), GetDimensionSize);
- // Calculate number of elements
- const DataType dataType = ConvertTfTensorDataType(tfDataType);
+ // Calculates number of elements.
+ const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
unsigned int numElements = 0U;
if (!dimensionSizes.empty())
@@ -819,53 +914,65 @@ ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, co
std::vector<int8_t> tensorData;
- // Get tensor data from the list of values attribute
+ // Get tensor data from the list of values attribute.
if (tfTensor.tensor_content().empty())
{
InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
// If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
- // tensor of the provided number of elements
+ // tensor of the provided number of elements.
if (numElements == 0)
{
- const unsigned int tfNumElements = static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
+ const unsigned int tfNumElements =
+ static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
dimensionSizes.push_back(tfNumElements);
}
}
- // Get tensor data from tensor content attribute
+ // Gets tensor data from tensor content attribute.
else
{
tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
- // Check if a tensor shape is defined for the tensor content
+ // Checks if a tensor shape is defined for the tensor content.
if (numElements == 0)
{
- throw ParseException(boost::str(
- boost::format("No tensor shape found for Const node - %1%")
- % nodeDef.name()));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "No tensor shape found for Const node - %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
}
- // Const node requires at least a list of values or a content attribute
+ // Const node requires at least a list of values or a content attribute.
if (tensorData.empty())
{
- throw ParseException(boost::str(
- boost::format("No tensor data found for Const node - %1%")
- % nodeDef.name()));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "No tensor data found for Const node - %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
- const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()), dimensionSizes.data(), dataType);
+ const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
+ dimensionSizes.data(),
+ dataType);
// If we have a list of values, then the length of the list must be
- // less than or equal to the number of elements implied by the shape argument
+ // less than or equal to the number of elements implied by the shape argument.
if (tensorData.size() > tensorInfo.GetNumBytes())
{
- throw ParseException(boost::str(
- boost::format("Number of elements (%1%) should be less than or equal \
- to the number of elements implied by the shape argument (%2%) for Const node - %3%")
- % (tensorData.size() / GetDataTypeSize(dataType))
- % tensorInfo.GetNumElements()
- % nodeDef.name()));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Number of elements (%1%) should be less than or equal "
+ "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
+ % (tensorData.size() / GetDataTypeSize(dataType))
+ % tensorInfo.GetNumElements()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
@@ -896,7 +1003,13 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports Convolution layers with constant weights");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
+ % nodeDef.name()
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* weightNode =
boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
@@ -905,7 +1018,7 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
- // read the dilations, if present - only [1,1,1,1] (the default) is supported
+ // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
if (!dilations.empty())
{
@@ -913,7 +1026,12 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
{
if (dilation != 1u)
{
- throw ParseException("ArmNN only supports Convolution layers with dilations [1,1,1,1]");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
}
}
@@ -921,11 +1039,13 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
Convolution2dDescriptor desc;
desc.m_BiasEnabled = false;
+ CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
+
if (dataFormat == "NHWC")
{
desc.m_StrideX = strides[2];
desc.m_StrideY = strides[1];
- // Swizzle input to supported memory layout
+ // Swizzles input to supported memory layout.
inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
}
else if (dataFormat == "NCHW")
@@ -933,10 +1053,6 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
desc.m_StrideX = strides[3];
desc.m_StrideY = strides[2];
}
- else
- {
- throw ParseException("Unsupported data format passed for Conv2D. Only NHWC and NCHW supported");
- }
uint32_t inputHeight = inputTensorInfo.GetShape()[2];
uint32_t inputWidth = inputTensorInfo.GetShape()[3];
@@ -950,6 +1066,9 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
bool padding = false;
TensorInfo outputInfo;
+
+ CHECK_PADDING_TYPE(nodeDef, paddingString);
+
if (paddingString == "SAME")
{
padding = true;
@@ -976,10 +1095,6 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
static_cast<float>(desc.m_StrideX)))
}, DataType::Float32);
}
- else
- {
- throw ParseException("Only 'SAME' and 'VALID' padding supported");
- }
CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
@@ -1000,7 +1115,7 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
}
ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
- const tensorflow::GraphDef& graphDef)
+ const tensorflow::GraphDef& graphDef)
{
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1008,7 +1123,14 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports Depthwise Convolution layers with constant weights");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports Depthwise Convolution layer with constant weights. "
+ "Non const input found %1% for node %2% %3%")
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* weightNode =
boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
@@ -1021,11 +1143,13 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
DepthwiseConvolution2dDescriptor desc;
desc.m_BiasEnabled = false;
+ CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
+
if (dataFormat == "NHWC")
{
desc.m_StrideX = strides[2];
desc.m_StrideY = strides[1];
- // Swizzle input to supported memory layout
+ // Swizzles input to supported memory layout.
inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
}
else if (dataFormat == "NCHW")
@@ -1033,10 +1157,6 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
desc.m_StrideX = strides[3];
desc.m_StrideY = strides[2];
}
- else
- {
- throw ParseException("Unsupported data format passed for DepthwiseConv2dNative. Only NHWC and NCHW supported");
- }
uint32_t inputHeight = inputTensorInfo.GetShape()[2];
uint32_t inputWidth = inputTensorInfo.GetShape()[3];
@@ -1050,6 +1170,9 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
bool padding = false;
TensorInfo outputInfo;
+
+ CHECK_PADDING_TYPE(nodeDef, paddingString);
+
if (paddingString == "SAME")
{
padding = true;
@@ -1076,10 +1199,6 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
static_cast<float>(desc.m_StrideX)))
}, DataType::Float32);
}
- else
- {
- throw ParseException("Only 'SAME' and 'VALID' padding supported");
- }
CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
@@ -1106,37 +1225,66 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports FusedBatchNormalization layers with constant scale");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports FusedBatchNormalization layers with constant scale. "
+ "Input %1%. Node %2% %3%")
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* scaleNode =
boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports FusedBatchNormalization layers with constant offset");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports FusedBatchNormalization layers with constant offset. "
+ "Input %1%. Node %2% %3%")
+ % inputs[2].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* offsetNode =
boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports FusedBatchNormalization layers with constant mean");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports FusedBatchNormalization layers with constant mean. "
+ "Input %1%. Node %2% %3%")
+ % inputs[3].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* meanNode =
boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports FusedBatchNormalization layers with constant variance");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports FusedBatchNormalization layers with constant variance. "
+ "Input %1%. Node %2% %3%")
+ % inputs[4].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<float>* varianceNode =
boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
- // The descriptor only has the epsilon attribute
+ // The descriptor only has the epsilon attribute.
BatchNormalizationDescriptor desc;
desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
- // data for the parsed tensor args (scale, offset, mean, variance) must be stored locally until the layer is added
+ // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
+ // locally until the layer is added.
std::vector<float> scaleTensorData;
ConstTensor scaleTensor = scaleNode->GetConstTensor(false, scaleTensorData);
@@ -1175,11 +1323,108 @@ ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& no
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
}
+bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
+ size_t alphaLayerIndex,
+ const OutputOfParsedTfOperation& otherOp,
+ armnn::IOutputSlot** outputOfLeakyRelu,
+ armnn::ActivationDescriptor & desc)
+{
+ const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
+
+ // Verifying all these assumptions hold:
+ //
+ // 1, the mulNodeDef is an elementwise multiplication node "Mul"
+ // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
+ // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
+ //
+
+ if (mulNodeDef.op() == "Mul")
+ {
+ size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
+
+ BOOST_ASSERT(inputs.size() == 2);
+ BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
+ BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
+ BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
+
+ if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
+ {
+ if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
+ {
+ ParsedConstTfOperation<float>* alpha =
+ boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
+ inputs[alphaLayerIndex].m_IndexedValue);
+
+ std::vector<float> const_data;
+ ConstTensor const_tensor = alpha->GetConstTensor(false, const_data);
+
+ if (const_data.size() == 1)
+ {
+ desc.m_Function = ActivationFunction::LeakyReLu;
+ desc.m_A = const_data[0];
+
+ *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// For max nodes, we only support those as part of a leaky relu, i.e.,
+// as part for a max(mul(a, x), x) expression. We thus need to
+// identify one input as a multiplication with a scalar constant,
+// extract the constant and the two inputs, verify that the two other
+// inputs are the same node, and then create a leaky relu node.
+
+ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
+ const tensorflow::GraphDef& graphDef)
+{
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
+ auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
+ auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
+ IOutputSlot* outputOfLeakyRelu = nullptr;
+
+ ActivationDescriptor desc;
+
+ // There are four possible scenarios we need to support (respectively below):
+ // 1, max(mul(a, x), x)
+ // 2, max(mul(x, a), x)
+ // 3, max(x, mul(a, x))
+ // 4, max(x, mul(x, a))
+
+ if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
+ IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
+ IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
+ IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
+ {
+ BOOST_ASSERT(outputOfLeakyRelu != nullptr);
+
+ IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
+ outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
+ return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
+ }
+ else
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN currenly offers limited support for Maximum node when it can be fused to "
+ "form a LeakyRelu activation as leakyrelu=max(mul(alpha, X), X). "
+ "Node: %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+}
+
ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
- // In tensorflow, we have the last input of the Concat layer as the axis for concatenation
+ // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
unsigned int numConcatView = numInputs - 1;
@@ -1189,10 +1434,17 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
unsigned int mergeDim = 0;
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
- // The last input is the axis for concatenation
+ // The last input is the axis for concatenation.
if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports Concat with constant axis");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports Concat with constant axis. "
+ "Input %1%. Node %2% %3%")
+ % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* shapeNode =
boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
@@ -1200,27 +1452,42 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
std::vector<int32_t> axisTensorData;
ConstTensor axisTensor = shapeNode->GetConstTensor(false, axisTensorData);
- // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW
+ // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
const unsigned int concatDimInput = static_cast<unsigned int>(axisTensorData[0]);
- // Armnn supports concatenation along the channel dimension for data format NHWC and NCHW
+ // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
if (concatDimInput == 0 || concatDimInput == 2)
{
- throw ParseException("The dimension for concatenation is not supported by Armnn");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Dimension %1% for concatenation is not supported by Armnn. "
+ "Node %2% %3%")
+ % concatDimInput
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
- // This is the only concatDim we support in Armnn
+ // This is the only concatDim we support in armnn.
const unsigned int concatDim = 1;
for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
{
- // need to double check whether it should be
+ // Need to double check whether it should be
IOutputSlot& inputSlot =
inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
{
- throw ParseException("The number of dimensions for input tensors of the concatenation op should be 4");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "The number of dimensions: %1% for input tensors of the "
+ "concatenation op should be %2% for Node %3% %4%")
+ % inputTensorInfo.GetNumDimensions()
+ % MaxNumOfTensorDimensions
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
if (concatDimInput == 3)
@@ -1281,16 +1548,22 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- // Note: The Shape layer is handled in a special way, because:
- // 1. ARMNN doesn't support int32 tensors which it outputs
- // 2. ARMNN works with statically shaped tensors which are known at parse time
+ // Note: the Shape layer is handled in a special way, because:
+ // 1. ARMNN doesn't support int32 tensors which it outputs.
+ // 2. ARMNN works with statically shaped tensors which are known at parse time.
// 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
- // tensor which may be used as an input to other ops, most likely a Reshape
+ // tensor which may be used as an input to other ops, most likely a Reshape.
const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
if (tfDataType != tensorflow::DT_INT32)
{
- throw ParseException("Armnn only supports DT_INT32 as out_type");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
+ % tensorflow::DataType_Name(tfDataType)
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -1322,7 +1595,14 @@ ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports Reshape layers with constant shapes");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports Reshape layers with constant shapes. "
+ "Input %1% Node %2% %3%")
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* shapeNode =
boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
@@ -1352,22 +1632,35 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
{
- throw ParseException("ArmNN only supports ResizeBilinear layers with constant sizes");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports ResizeBilinear layers with constant sizes. "
+ "Input %1%. Node %2% %3%")
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
ParsedConstTfOperation<int32_t>* sizeNode =
boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
- // Check the align_corners attribute is not set
+ // Checks the align_corners attribute is not set.
if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
{
- throw ParseException("ArmNN only supports ResizeBilinear layers with align_corners set to false");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
+ "Node %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
- // data for the parsed tensor args (size) must be stored locally
+ // Data for the parsed tensor args (size) must be stored locally.
std::vector<int32_t> sizeTensorData;
ConstTensor sizeTensor = sizeNode->GetConstTensor(false, sizeTensorData);
- // The descriptor only has target height and width attributes, which we get from the size tensor
+ // The descriptor only has target height and width attributes, which we get from the size tensor.
ResizeBilinearDescriptor desc;
desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
@@ -1376,18 +1669,18 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
- // the input shape is always in BHWC format, this will be swizzled below; for now,
- // get the batch and channels to make up the ArmNN output shape with the target size
+ // The input shape is always in BHWC format, this will be swizzled below; for now,
+ // get the batch and channels to make up the ArmNN output shape with the target size.
unsigned int outBatch = inputTensorInfo.GetShape()[0];
unsigned int outChannels = inputTensorInfo.GetShape()[3];
unsigned int outHeight = desc.m_TargetHeight;
unsigned int outWidth = desc.m_TargetWidth;
TensorShape outShape({outBatch, outChannels, outHeight, outWidth});
- // The output DataType is always Float32, regardless of the input DataType
+ // The output DataType is always Float32, regardless of the input DataType.
const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
- // TensorFlow ResizeBilinear input is always in BHWC format, so add swizzle and deswizzle layers
+ // TensorFlow ResizeBilinear input is always in BHWC format, so add swizzle and deswizzle layers.
layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
@@ -1409,41 +1702,63 @@ TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo i
}
else
{
- throw ParseException(boost::str(
- boost::format("Unsupported DataType %1% for Squeeze operation")
- % tensorflow::DataType_Name(tfDataType)));
+ throw ParseException(
+ boost::str(
+ boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
+ % tensorflow::DataType_Name(tfDataType)
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
+
+ if (inputTensorInfo.GetNumDimensions() > 4)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
+ % inputTensorInfo.GetNumDimensions()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
+ static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+
if (squeezeDims.empty())
{
- for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
- {
- if (inputTensorInfo.GetShape()[i] == 1)
- {
- squeezeDims.push_back(i);
- }
- }
+ squeezeDims.assign(dimensionSequence,
+ dimensionSequence+inputTensorInfo.GetNumDimensions());
}
std::vector<uint32_t> outputDims;
for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
{
- bool includeDimension = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
- if (includeDimension)
+ bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
+ auto currentDimension = inputTensorInfo.GetShape()[i];
+ if (skipSqueeze || currentDimension != 1)
{
- outputDims.push_back(inputTensorInfo.GetShape()[i]);
+ outputDims.push_back(currentDimension);
}
}
if (outputDims.size() > 4)
{
- throw ParseException("Unsupported shape for Squeeze");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
+ % outputDims.size()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
- TensorInfo outTensorInfo = TensorInfo(boost::numeric_cast<unsigned int>(outputDims.size()),
- outputDims.data(),
- type);
+ TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
+ outputDims.data());
+
+ TensorInfo outTensorInfo = inputTensorInfo;
+ outTensorInfo.SetShape(outShape);
+ outTensorInfo.SetDataType(type);
return outTensorInfo;
}
@@ -1496,9 +1811,10 @@ ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, cons
}
/// An ParsedTfOperation for a MatMul node.
-/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because MatMul nodes are
-/// often used for the first part of a biased FullyConnected (MatMul followed by Add) and in these cases armnn doesn't
-/// need a separate layer for the MatMul.
+/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
+/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
+/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
+///
class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
{
public:
@@ -1516,46 +1832,35 @@ public:
ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- // Defer the creation of the layer (see ParsedMatMulTfOperation).
+ // Defers the creation of the layer (see ParsedMatMulTfOperation).
return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
}
-ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+/// An ParsedTfOperation for a Mul node.
+/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
+/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
+/// and in these cases armnn doesn't need a separate layer for the Mul.
+///
+class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
{
- boost::ignore_unused(graphDef);
-
- std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
-
- IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
- IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
- IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
-
- auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
- auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
-
- if (input0NumDims < input1NumDims)
+public:
+ ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
+ : DeferredSingleLayerParsedTfOperation(parser, node)
{
- const bool isNHWC = true;
- input0Slot = BroadcastForAddandMul(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
}
- if (input1NumDims < input0NumDims)
+
+ void CreateLayerDeferred() override
{
- const bool isNHWC = true;
- input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
+ BOOST_ASSERT(m_Layer == nullptr);
+ m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
}
+};
- input0Slot->Connect(layer->GetInputSlot(0));
- input1Slot->Connect(layer->GetInputSlot(1));
+ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+{
+ boost::ignore_unused(graphDef);
- if (input0NumDims < input1NumDims)
- {
- layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
- }
- else
- {
- layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
- }
- return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
+ return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
}
ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
@@ -1570,7 +1875,12 @@ ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeD
auto it = m_InputShapes.find(nodeDef.name());
if (it == m_InputShapes.end())
{
- throw ParseException("Missing input shape for Placeholder '" + nodeDef.name() + "'");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Missing input shape for Placeholder '%1%' %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
TensorInfo tensorInfo(it->second, DataType::Float32);
@@ -1691,7 +2001,13 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
if (inputs.size() != 1)
{
- throw ParseException("2D Pooling expects one input!");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "2D Pooling expects one input!. Got %1% for Node %2% %3%")
+ % inputs.size()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
@@ -1704,13 +2020,15 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
+ CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
+
if (dataFormat == "NHWC")
{
pooling2dDescriptor.m_StrideX = strides[2];
pooling2dDescriptor.m_StrideY = strides[1];
pooling2dDescriptor.m_PoolWidth = ksize[2];
pooling2dDescriptor.m_PoolHeight = ksize[1];
- // Swizzle input to supported memory layout
+ // Swizzles input to supported memory layout.
inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
}
else if (dataFormat == "NCHW")
@@ -1720,16 +2038,15 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
pooling2dDescriptor.m_PoolWidth = ksize[3];
pooling2dDescriptor.m_PoolHeight = ksize[2];
}
- else
- {
- throw ParseException("Only NHWC or NCHW supported for Pooling2d");
- }
uint32_t inputHeight = inputTensorInfo.GetShape()[2];
uint32_t inputWidth = inputTensorInfo.GetShape()[3];
bool padding = false;
TensorInfo outputInfo;
+
+ CHECK_PADDING_TYPE(nodeDef, paddingString);
+
if (paddingString == "SAME")
{
padding = true;
@@ -1756,10 +2073,6 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
static_cast<float>(pooling2dDescriptor.m_StrideX)))
}, DataType::Float32);
}
- else
- {
- throw ParseException("Only 'SAME' and 'VALID' padding supported");
- }
CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
@@ -1770,7 +2083,12 @@ ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef
IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
if (layer == nullptr)
{
- throw ParseException("Failed to add pooling2d layer");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Failed to add pooling2d layer for %1% %2%")
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
@@ -1803,19 +2121,21 @@ ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeD
// with the same data in the correct dimension for broadcast in addition.
if(input1Info.GetNumDimensions() != 1)
{
- throw ParseException("Unsupported bias for BiasAdd. It should be a 1D vector.");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Unsupported bias for BiasAdd. It should be a 1D vector. "
+ "Got %1% dimensions for input %2%. Node %3% %4%")
+ % input1Info.GetNumDimensions()
+ % inputs[1].m_IndexedValue->GetNode().name()
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
- const bool isNHWC = (dataFormat == "NHWC");
- const bool isNCHW = (dataFormat == "NCHW");
-
- if (!isNHWC && ! isNCHW)
- {
- throw ParseException("Only NHWC or NCHW supported for BiasAdd");
- }
- input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
+ CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
+ input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
}
else
{
@@ -1849,15 +2169,52 @@ ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeD
return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
}
+IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
+{
+ std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
+
+ IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
+ IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
+ IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
+
+ auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
+ auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
+
+ if (input0NumDims < input1NumDims)
+ {
+ const bool isNHWC = true;
+ input0Slot = BroadcastForAddandMul(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
+ }
+ if (input1NumDims < input0NumDims)
+ {
+ const bool isNHWC = true;
+ input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
+ }
+
+ input0Slot->Connect(layer->GetInputSlot(0));
+ input1Slot->Connect(layer->GetInputSlot(1));
+
+ if (input0NumDims < input1NumDims)
+ {
+ layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
+ }
+ else
+ {
+ layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
+ }
+ return layer;
+}
+
+
IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
{
- // find bias const (if applicable)
+ // Finds bias const (if applicable).
ParsedConstTfOperation<float>* biasNode = nullptr;
if (addNodeDef != nullptr)
{
std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
- // find our inputs
+ // Finds our inputs.
if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
{
biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
@@ -1868,11 +2225,20 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
}
else
{
- throw ParseException("ArmNN only supports fully connected layers with constant bias");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports fully connected layers with constant bias. "
+ "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
+ % addInputs[0].m_IndexedValue->GetNode().name()
+ % addInputs[1].m_IndexedValue->GetNode().name()
+ % addNodeDef->name()
+ % matMulNodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
}
- // find matmul inputs
+ // Finds matmul inputs.
ParsedConstTfOperation<float>* weightNode = nullptr;
ParsedTfOperation* inputNode = nullptr;
unsigned int inputIdx = 0;
@@ -1891,18 +2257,26 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
}
else
{
- throw ParseException("ArmNN only supports fully connected layers with constant weights");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "ArmNN only supports fully connected layers with constant weights. "
+ "Inputs %1% and %2%. MatMulNode %3% %4%")
+ % mulInputs[0].m_IndexedValue->GetNode().name()
+ % mulInputs[1].m_IndexedValue->GetNode().name()
+ % matMulNodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
std::vector<float> weightTensorData;
- // handle weight
+ // Handles weight.
ConstTensor weights = weightNode->GetConstTensor(false, weightTensorData);
FullyConnectedDescriptor desc;
desc.m_BiasEnabled = addNodeDef != nullptr;
IConnectableLayer* layer = nullptr;
- // make the layer
+ // Makes the layer.
if (addNodeDef != nullptr)
{
std::vector<float> biasTensorData;
@@ -1910,7 +2284,14 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
if (weights.GetShape()[1] != biases.GetShape()[0])
{
- throw ParseException("shape of matmul and bias do not match");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Shape of matmul weights and bias do not match. "
+ "AddNode %1%. MatMulNode %2% %3%")
+ % addNodeDef->name()
+ % matMulNodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
@@ -1925,7 +2306,7 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
- // handle output
+ // Handles output.
TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
return layer;
@@ -1933,7 +2314,7 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- // get the type of the node (assume float)
+ // Gets the type of the node (assume float).
tensorflow::DataType type = tensorflow::DT_FLOAT;
if (nodeDef.attr().count("T") != 0)
{
@@ -1948,7 +2329,14 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
{
- throw ParseException("Currently only FLOAT is supported for tensorflow nodes (apart from Const)");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
+ "Got %1% for Node %2% %3%")
+ % tensorflow::DataType_Name(type)
+ % nodeDef.name()
+ % CHECK_LOCATION().AsString()));
}
const std::string& operation = nodeDef.op();
@@ -1959,7 +2347,7 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
- // Store the parsed operation so that dependent layers can connect to it
+ // Stores the parsed operation so that dependent layers can connect to it.
auto it = m_ParsedTfOperations.find(nodeDef.name());
if (it != m_ParsedTfOperations.end())
{
@@ -1967,7 +2355,7 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
}
m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
- // If this node was requested as an output from the network then add an ArmNN output layer
+ // If this node was requested as an output from the network, then adds an ArmNN output layer.
if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
m_RequestedOutputs.end())
{
@@ -1986,14 +2374,18 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
}
else
{
- throw ParseException(boost::str(
- boost::format("Unsupported operation %1% in tensorflow::GraphDef") % operation));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Unsupported operation %1% in tensorflow::GraphDef %2%")
+ % operation
+ % CHECK_LOCATION().AsString()));
}
}
void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
{
- // add all nodes to our map
+ // Adds all nodes to our map.
m_NodesByName.clear();
m_NetworkInputsBindingInfo.clear();
m_NetworkOutputsBindingInfo.clear();
@@ -2004,19 +2396,24 @@ void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
m_NodesByName[node.name()] = &node;
}
- // Find the output nodes the user requested
+ // Finds the output nodes the user requested.
std::vector<const tensorflow::NodeDef*> targetNodes;
for (const std::string& requestedOutputName : m_RequestedOutputs)
{
auto nodeIt = m_NodesByName.find(requestedOutputName);
if (nodeIt == m_NodesByName.end())
{
- throw ParseException("Couldn't find requested output node '" + requestedOutputName + "' in graph");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Couldn't find requested output node '%1%' in graph %2%")
+ % requestedOutputName
+ % CHECK_LOCATION().AsString()));
}
targetNodes.push_back(nodeIt->second);
}
- // Sort them into a linear ordering such that all inputs of a node are before the node itself
+ // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
std::vector<const tensorflow::NodeDef*> sortedNodes;
if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
targetNodes,
@@ -2031,10 +2428,14 @@ void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
},
sortedNodes))
{
- throw ParseException("Cycle detected in graph");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Cycle detected in graph %1%")
+ % CHECK_LOCATION().AsString()));
}
- // Parse each node in order, knowing that all inputs of a node will be processed before the node itself
+ // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
for (const auto& it : sortedNodes)
{
const tensorflow::NodeDef& currentNode = *it;
@@ -2050,12 +2451,15 @@ INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
if (fd == nullptr)
{
- std::stringstream error;
- error << "Graph file " << graphFile << " failed to open";
- throw FileNotFoundException(error.str());
+ throw FileNotFoundException(
+ boost::str(
+ boost::format(
+ "Graph file %1% failed to open %2%")
+ % graphFile
+ % CHECK_LOCATION().AsString()));
}
- // Parse the file into a message
+ // Parses the file into a message.
tensorflow::GraphDef graphDef;
auto input = new google::protobuf::io::FileInputStream(fileno(fd));
bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
@@ -2064,9 +2468,11 @@ INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
if (!success)
{
- std::stringstream error;
- error << "Failed to parse graph file";
- throw ParseException(error.str());
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Failed to parse graph file %1%")
+ % CHECK_LOCATION().AsString()));
}
return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
@@ -2076,15 +2482,17 @@ INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
const std::map<std::string, TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs)
{
- // Parse the string into a message
+ // Parses the string into a message.
tensorflow::GraphDef graphDef;
bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
if (!success)
{
- std::stringstream error;
- error << "Failed to parse graph file";
- throw ParseException(error.str());
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Failed to parse graph file %1%")
+ % CHECK_LOCATION().AsString()));
}
return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
@@ -2098,12 +2506,15 @@ INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
if (fd == nullptr)
{
- std::stringstream error;
- error << "Graph file " << graphFile << " failed to open";
- throw FileNotFoundException(error.str());
+ throw FileNotFoundException(
+ boost::str(
+ boost::format(
+ "Graph file %1% failed to open %2%")
+ % graphFile
+ % CHECK_LOCATION().AsString()));
}
- // Parse the file into a message
+ // Parses the file into a message.
tensorflow::GraphDef graphDef;
google::protobuf::io::FileInputStream inStream(fileno(fd));
@@ -2114,9 +2525,12 @@ INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
if (!success)
{
- std::stringstream error;
- error << "Failed to parse protobuf file" << graphFile;
- throw ParseException(error.str());
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Failed to parse protobuf file %1% %2%")
+ % graphFile
+ % CHECK_LOCATION().AsString()));
}
return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
@@ -2131,7 +2545,11 @@ INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& grap
m_InputShapes = inputShapes;
if (requestedOutputs.size() == 0)
{
- throw ParseException("requestedOutputs must have at least one entry");
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "requestedOutputs must have at least one entry %1%")
+ % CHECK_LOCATION().AsString()));
}
m_RequestedOutputs = requestedOutputs;
@@ -2152,7 +2570,7 @@ INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& grap
void TfParser::Cleanup()
{
- // cleanup, in case we reuse this parser
+ // Cleanup, in case we reuse this parser.
m_InputShapes.clear();
m_RequestedOutputs.clear();
m_NodesByName.clear();
@@ -2176,7 +2594,13 @@ std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string
auto it = nameToBindingInfo.find(layerName);
if (it == nameToBindingInfo.end())
{
- throw InvalidArgumentException(boost::str(boost::format("Unknown %1% '%2%'") % bindingPointDesc % layerName));
+ throw InvalidArgumentException(
+ boost::str(
+ boost::format(
+ "Unknown %1% '%2%' %3%")
+ % bindingPointDesc
+ % layerName
+ % CHECK_LOCATION().AsString()));
}
return it->second;
}
@@ -2205,8 +2629,13 @@ void TfParser::TrackBindingPoint(IConnectableLayer* layer,
}
else
{
- throw ParseException(boost::str(
- boost::format("Id %1% used by more than one %2% layer") % id % bindingPointDesc));
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Id %1% used by more than one %2% layer %3%")
+ % id
+ % bindingPointDesc
+ % CHECK_LOCATION().AsString()));
}
}
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index c5b4bce8ac..75cd3a5bd0 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -36,9 +36,9 @@ using ParsedTfOperationPtr = std::unique_ptr<ParsedTfOperation>;
///
/// WithOutputTensorIndex wraps a value and an index. The purpose of
-/// this template is to signify that in Tensorflow the input name of
-/// a layer has the convention of 'inputTensorName:#index' where the
-/// #index can be omitted and it implicitly means the 0. output of
+/// this template is to signify that, in Tensorflow, the input name of
+/// a layer has the convention of 'inputTensorName:#index', where the
+/// #index can be omitted and it implicitly means the 0 output of
/// the referenced layer. By supporting this notation we can handle
/// layers with multiple outputs, such as Split.
///
@@ -64,28 +64,28 @@ using OutputId = WithOutputTensorIndex<std::string>;
class TfParser : public ITfParser
{
public:
- /// Create the network from a protobuf text file on disk
+ /// Creates the network from a protobuf text file on the disk.
virtual armnn::INetworkPtr CreateNetworkFromTextFile(
const char* graphFile,
const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs) override;
- /// Create the network from a protobuf binary file on disk
+ /// Creates the network from a protobuf binary file on the disk.
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(
const char* graphFile,
const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs) override;
- /// Create the network directly from protobuf text in a string. Useful for debugging/testing
+ /// Creates the network directly from protobuf text in a string. Useful for debugging/testing.
virtual armnn::INetworkPtr CreateNetworkFromString(
const char* protoText,
const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs) override;
- /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name
+ /// Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name.
virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const override;
- /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name
+ /// Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name.
virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const override;
public:
@@ -95,19 +95,20 @@ private:
template <typename T>
friend class ParsedConstTfOperation;
friend class ParsedMatMulTfOperation;
+ friend class ParsedMulTfOperation;
- /// Parses a GraphDef loaded into memory from one of the other CreateNetwork*
+ /// Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs);
- /// sets up variables and then performs BFS to parse all nodes
+ /// Sets up variables and then performs BFS to parse all nodes.
void LoadGraphDef(const tensorflow::GraphDef& graphDef);
- /// parses a given node, assuming nodes before it in graph have been done
+ /// Parses a given node, assuming nodes before it in the graph have been done.
void LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
- /// Handling identity layers as the input for Conv2D layer
+ /// Handling identity layers as the input for Conv2D layer.
const tensorflow::NodeDef* ResolveIdentityNode(const tensorflow::NodeDef* nodeDef);
/// Finds the nodes connected as inputs of the given node in the graph.
std::vector<OutputOfConstNodeDef> GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const;
@@ -120,7 +121,7 @@ private:
ParsedTfOperationPtr ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
- /// Checks if there is a pre-parsed const tensor is available with the given name and Type
+ /// Checks if there is a pre-parsed const tensor available with the given name and Type.
template<typename Type>
bool HasParsedConstTensor(const std::string & nodeName) const;
@@ -149,11 +150,22 @@ private:
ParsedTfOperationPtr ParseAvgPool(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr ParsePooling2d(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef,
armnn::PoolingAlgorithm pooltype);
+ ParsedTfOperationPtr ParseMaximum(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef);
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef& nodeDef, armnn::ActivationDescriptor& desc);
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd = false);
+
+private:
+ armnn::IConnectableLayer* AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef);
+
armnn::IConnectableLayer* AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName);
+ bool IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
+ size_t alphaLayerIndex,
+ const OutputOfParsedTfOperation& otherOp,
+ armnn::IOutputSlot** outputOfLeakyRelu,
+ armnn::ActivationDescriptor & desc);
+
static std::pair<armnn::LayerBindingId, armnn::TensorInfo> GetBindingInfo(const std::string& layerName,
const char* bindingPointDesc,
const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo);
@@ -173,27 +185,27 @@ private:
void Cleanup();
- /// The network we're building. Gets cleared after it is passed to the user
+ /// The network we're building. Gets cleared after it is passed to the user.
armnn::INetworkPtr m_Network;
using OperationParsingFunction = ParsedTfOperationPtr(TfParser::*)(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef);
- /// map of TensorFlow operation names to parsing member functions
+ /// Map of TensorFlow operation names to parsing member functions.
static const std::map<std::string, OperationParsingFunction> ms_OperationNameToParsingFunctions;
std::map<std::string, armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_RequestedOutputs;
- /// map of nodes extracted from the GraphDef to speed up parsing
+ /// Map of nodes extracted from the GraphDef to speed up parsing.
std::unordered_map<std::string, const tensorflow::NodeDef*> m_NodesByName;
std::unordered_map<std::string, ParsedTfOperationPtr> m_ParsedTfOperations;
- /// maps input layer names to their corresponding ids and tensor infos
+ /// Maps input layer names to their corresponding ids and tensor info.
std::unordered_map<std::string, BindingPointInfo> m_NetworkInputsBindingInfo;
- /// maps output layer names to their corresponding ids and tensor infos
+ /// Maps output layer names to their corresponding ids and tensor info.
std::unordered_map<std::string, BindingPointInfo> m_NetworkOutputsBindingInfo;
};
}
diff --git a/src/armnnTfParser/test/Activations.cpp b/src/armnnTfParser/test/Activations.cpp
index 72ed64d653..595fce768e 100644
--- a/src/armnnTfParser/test/Activations.cpp
+++ b/src/armnnTfParser/test/Activations.cpp
@@ -9,8 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-
-struct ActivationFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ActivationFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit ActivationFixture(const char* activationFunction)
{
@@ -107,7 +106,4 @@ BOOST_FIXTURE_TEST_CASE(ParseTanh, TanhFixture)
{ -0.09966799f, -0.19737528f, -0.29131261f, -0.379949f, 0.09966799f, 0.19737528f, 0.29131261f });
}
-
-
-
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Addition.cpp b/src/armnnTfParser/test/Addition.cpp
index c9e69268c6..c642b5a45a 100644
--- a/src/armnnTfParser/test/Addition.cpp
+++ b/src/armnnTfParser/test/Addition.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct AdditionFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct AdditionFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
AdditionFixture()
{
diff --git a/src/armnnTfParser/test/BiasAdd.cpp b/src/armnnTfParser/test/BiasAdd.cpp
index e29aeb1057..1e9911d717 100644
--- a/src/armnnTfParser/test/BiasAdd.cpp
+++ b/src/armnnTfParser/test/BiasAdd.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct BiasAddFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct BiasAddFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit BiasAddFixture(const std::string& dataFormat)
{
diff --git a/src/armnnTfParser/test/BroadcastForAdd.cpp b/src/armnnTfParser/test/BroadcastForAdd.cpp
index 4c9731d7fc..aab6dbfd79 100644
--- a/src/armnnTfParser/test/BroadcastForAdd.cpp
+++ b/src/armnnTfParser/test/BroadcastForAdd.cpp
@@ -6,10 +6,10 @@
#include <boost/test/unit_test.hpp>
#include "armnnTfParser/ITfParser.hpp"
#include "ParserPrototxtFixture.hpp"
-// This is a special case for add, which supports broadcasting
+// This is a special case for add, which supports broadcasting.
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct BroadcastForAddFixtureSlot1 : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct BroadcastForAddFixtureSlot1 : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
BroadcastForAddFixtureSlot1()
{
@@ -71,7 +71,7 @@ struct BroadcastForAddFixtureSlot1 : public ParserPrototxtFixture<armnnTfParser:
}
};
-struct BroadcastForAddFixtureSlot0 : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct BroadcastForAddFixtureSlot0 : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
BroadcastForAddFixtureSlot0()
{
diff --git a/src/armnnTfParser/test/Concat.cpp b/src/armnnTfParser/test/Concat.cpp
index a7d5ea03af..3e39bef2e7 100644
--- a/src/armnnTfParser/test/Concat.cpp
+++ b/src/armnnTfParser/test/Concat.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct ConcatFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ConcatFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit ConcatFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1,
unsigned int concatDim)
diff --git a/src/armnnTfParser/test/ConcatOfConcats.cpp b/src/armnnTfParser/test/ConcatOfConcats.cpp
index 7316b9f1ac..2832159acc 100644
--- a/src/armnnTfParser/test/ConcatOfConcats.cpp
+++ b/src/armnnTfParser/test/ConcatOfConcats.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct ConcatOfConcatsFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ConcatOfConcatsFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit ConcatOfConcatsFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1,
const armnn::TensorShape& inputShape2, const armnn::TensorShape& inputShape3,
diff --git a/src/armnnTfParser/test/Constant.cpp b/src/armnnTfParser/test/Constant.cpp
index 09587fc3d5..bc8b36d61b 100644
--- a/src/armnnTfParser/test/Constant.cpp
+++ b/src/armnnTfParser/test/Constant.cpp
@@ -14,13 +14,13 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser)
// Tests that a Const node in Tensorflow can be converted to a ConstLayer in armnn (as opposed to most
// Const nodes which are used as weight inputs for convolutions etc. and are therefore not converted to
// armnn ConstLayers).
-struct ConstantFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ConstantFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ConstantFixture()
{
- // input = tf.placeholder(tf.float32, name = "input")
- // const = tf.constant([17], tf.float32, [1])
- // output = tf.add(input, const, name = "output")
+ // Input = tf.placeholder(tf.float32, name = "input")
+ // Const = tf.constant([17], tf.float32, [1])
+ // Output = tf.add(input, const, name = "output")
m_Prototext =
R"(
node {
@@ -90,12 +90,12 @@ BOOST_FIXTURE_TEST_CASE(Constant, ConstantFixture)
// Tests that a single Const node in Tensorflow can be used twice by a dependant node. This should result in only
// a single armnn ConstLayer being created.
-struct ConstantReusedFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ConstantReusedFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ConstantReusedFixture()
{
- // const = tf.constant([17], tf.float32, [1])
- // output = tf.add(const, const, name = "output")
+ // Const = tf.constant([17], tf.float32, [1])
+ // Output = tf.add(const, const, name = "output")
m_Prototext =
R"(
node {
@@ -145,7 +145,7 @@ BOOST_FIXTURE_TEST_CASE(ConstantReused, ConstantReusedFixture)
}
template <int ListSize>
-struct ConstantValueListFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ConstantValueListFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ConstantValueListFixture()
{
@@ -180,7 +180,7 @@ node {
m_Prototext += std::string("float_val : ") + std::to_string(value) + "\n";
}
- m_Prototext +=
+ m_Prototext +=
R"(
}
}
@@ -209,7 +209,7 @@ BOOST_FIXTURE_TEST_CASE(ConstantMaxValueList, ConstantMaxValueListFixture)
}
template <bool WithShape, bool WithContent, bool WithValueList>
-struct ConstantCreateFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ConstantCreateFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ConstantCreateFixture()
{
diff --git a/src/armnnTfParser/test/Convolution2d.cpp b/src/armnnTfParser/test/Convolution2d.cpp
index a7c7648b81..8ad1036ef1 100644
--- a/src/armnnTfParser/test/Convolution2d.cpp
+++ b/src/armnnTfParser/test/Convolution2d.cpp
@@ -11,14 +11,14 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct Convolution2dFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit Convolution2dFixture(const char* paddingType)
: Convolution2dFixture(paddingType, 1)
{}
- // dilation: 0 - dilations attribute is not included;
- // dilation: >0 - dilations attribute set to [1,v,v,1], where v is the value of the dilation arg
+ // Dilation: 0 - dilations attribute is not included;
+ // Dilation: >0 - dilations attribute set to [1,v,v,1], where v is the value of the dilation arg
explicit Convolution2dFixture(const char* paddingType, int stride, int dilation = 0)
{
std::string strideString = std::to_string(stride);
@@ -309,13 +309,8 @@ BOOST_AUTO_TEST_CASE(ParseConv2DDilation2)
armnn::TensorShape tensorShape = { 1, 3, 3, 1 };
inputShapes["graphInput"] = tensorShape;
armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
- BOOST_CHECK_EXCEPTION(parser->CreateNetworkFromString(prototext, inputShapes, { "potato" }),
- armnn::ParseException,
- [] (armnn::ParseException const& ex)->bool
- {
- return strcmp(ex.what(),
- "ArmNN only supports Convolution layers with dilations [1,1,1,1]") == 0;
- });
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, inputShapes, { "potato" }),
+ armnn::ParseException);
}
diff --git a/src/armnnTfParser/test/DepthwiseConvolution2d.cpp b/src/armnnTfParser/test/DepthwiseConvolution2d.cpp
index 84e7a7e7a9..a44f94957b 100644
--- a/src/armnnTfParser/test/DepthwiseConvolution2d.cpp
+++ b/src/armnnTfParser/test/DepthwiseConvolution2d.cpp
@@ -11,7 +11,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct DepthwiseConvolution2dFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct DepthwiseConvolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit DepthwiseConvolution2dFixture(const char* paddingType)
{
diff --git a/src/armnnTfParser/test/FullyConnected.cpp b/src/armnnTfParser/test/FullyConnected.cpp
index 2a7b4951b7..e7f040e784 100644
--- a/src/armnnTfParser/test/FullyConnected.cpp
+++ b/src/armnnTfParser/test/FullyConnected.cpp
@@ -14,15 +14,15 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser)
// In Tensorflow fully connected layers are expressed as a MatMul followed by an Add.
// The TfParser must detect this case and convert them to a FullyConnected layer.
-struct FullyConnectedFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct FullyConnectedFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
FullyConnectedFixture()
{
- // input = tf.placeholder(tf.float32, [1, 1], "input")
- // weights = tf.constant([2], tf.float32, [1, 1])
- // matmul = tf.matmul(input, weights)
- // bias = tf.constant([1], tf.float32)
- // output = tf.add(matmul, bias, name="output")
+ // Input = tf.placeholder(tf.float32, [1, 1], "input")
+ // Weights = tf.constant([2], tf.float32, [1, 1])
+ // Matmul = tf.matmul(input, weights)
+ // Bias = tf.constant([1], tf.float32)
+ // Output = tf.add(matmul, bias, name="output")
m_Prototext = R"(
node {
name: "input"
@@ -153,7 +153,7 @@ BOOST_FIXTURE_TEST_CASE(FullyConnected, FullyConnectedFixture)
// C-- A A -- C
// \ /
// A
-struct MatMulUsedInTwoFcFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MatMulUsedInTwoFcFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MatMulUsedInTwoFcFixture()
{
@@ -326,7 +326,7 @@ BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture)
RunTest<1>({ 3 }, { 32 });
// Ideally we would check here that the armnn network has 5 layers:
// Input, 2 x FullyConnected (biased), Add and Output.
- // This would make sure the parser hasn't incorrectly added some unconnected layers corresponding to the MatMul
+ // This would make sure the parser hasn't incorrectly added some unconnected layers corresponding to the MatMul.
}
// Similar to MatMulUsedInTwoFc, but this time the Adds are 'staggered' (see diagram), which means that only one
@@ -338,16 +338,16 @@ BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture)
// C2 -- A |
// \ /
// A
-struct MatMulUsedInTwoFcStaggeredFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MatMulUsedInTwoFcStaggeredFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MatMulUsedInTwoFcStaggeredFixture()
{
- // input = tf.placeholder(tf.float32, shape=[1,1], name = "input")
- // const1 = tf.constant([17], tf.float32, [1,1])
- // mul = tf.matmul(input, const1)
- // const2 = tf.constant([7], tf.float32, [1])
- // fc = tf.add(mul, const2)
- // output = tf.add(mul, fc, name="output")
+ // Input = tf.placeholder(tf.float32, shape=[1,1], name = "input")
+ // Const1 = tf.constant([17], tf.float32, [1,1])
+ // Mul = tf.matmul(input, const1)
+ // Monst2 = tf.constant([7], tf.float32, [1])
+ // Fc = tf.add(mul, const2)
+ // Output = tf.add(mul, fc, name="output")
m_Prototext = R"(
node {
name: "input"
@@ -484,13 +484,13 @@ BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFcStaggered, MatMulUsedInTwoFcStaggeredFi
}
// A MatMul in isolation, not connected to an add. Should result in a non-biased FullyConnected layer.
-struct MatMulFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MatMulFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MatMulFixture()
{
- // input = tf.placeholder(tf.float32, shape = [1, 1], name = "input")
- // const = tf.constant([17], tf.float32, [1, 1])
- // output = tf.matmul(input, const, name = "output")
+ // Input = tf.placeholder(tf.float32, shape = [1, 1], name = "input")
+ // Const = tf.constant([17], tf.float32, [1, 1])
+ // Output = tf.matmul(input, const, name = "output")
m_Prototext = R"(
node {
name: "input"
diff --git a/src/armnnTfParser/test/FusedBatchNorm.cpp b/src/armnnTfParser/test/FusedBatchNorm.cpp
index 632d5f01f9..69f018f194 100644
--- a/src/armnnTfParser/test/FusedBatchNorm.cpp
+++ b/src/armnnTfParser/test/FusedBatchNorm.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct FusedBatchNormFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct FusedBatchNormFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
FusedBatchNormFixture()
{
@@ -166,10 +166,10 @@ struct FusedBatchNormFixture : public ParserPrototxtFixture<armnnTfParser::ITfPa
BOOST_FIXTURE_TEST_CASE(ParseFusedBatchNorm, FusedBatchNormFixture)
{
- RunTest<4>({1, 2, 3, 4, 5, 6, 7, 8, 9}, // input data
+ RunTest<4>({1, 2, 3, 4, 5, 6, 7, 8, 9}, // Input data.
{-2.8277204f, -2.12079024f, -1.4138602f,
-0.7069301f, 0.0f, 0.7069301f,
- 1.4138602f, 2.12079024f, 2.8277204f}); // expected output data
+ 1.4138602f, 2.12079024f, 2.8277204f}); // Expected output data.
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Identity.cpp b/src/armnnTfParser/test/Identity.cpp
index ca20de5760..9baa8988f3 100644
--- a/src/armnnTfParser/test/Identity.cpp
+++ b/src/armnnTfParser/test/Identity.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct IdentitySimpleFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct IdentitySimpleFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
IdentitySimpleFixture()
{
@@ -51,7 +51,7 @@ BOOST_FIXTURE_TEST_CASE(IdentitySimple, IdentitySimpleFixture)
RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 1.0f, 2.0f, 3.0f, 4.0f });
}
-struct IdentityFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct IdentityFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
IdentityFixture()
{
@@ -105,7 +105,7 @@ BOOST_FIXTURE_TEST_CASE(ParseIdentity, IdentityFixture)
RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 2.0f, 4.0f, 6.0f, 8.0f });
}
-struct IdentityChainFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct IdentityChainFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
IdentityChainFixture()
{
diff --git a/src/armnnTfParser/test/LocalResponseNormalization.cpp b/src/armnnTfParser/test/LocalResponseNormalization.cpp
index a7c2bfe3e1..dcfbbb6918 100644
--- a/src/armnnTfParser/test/LocalResponseNormalization.cpp
+++ b/src/armnnTfParser/test/LocalResponseNormalization.cpp
@@ -9,8 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-
-struct LocalResponseNormalizationBaseFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct LocalResponseNormalizationBaseFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit LocalResponseNormalizationBaseFixture(float alpha, float beta, float bias)
{
diff --git a/src/armnnTfParser/test/MaximumForLeakyRelu.cpp b/src/armnnTfParser/test/MaximumForLeakyRelu.cpp
new file mode 100644
index 0000000000..a2566fced5
--- /dev/null
+++ b/src/armnnTfParser/test/MaximumForLeakyRelu.cpp
@@ -0,0 +1,169 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct UnsupportedMaximumFixture
+ : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ UnsupportedMaximumFixture()
+ {
+ m_Prototext = R"(
+ node {
+ name: "graphInput"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "Maximum"
+ op: "Maximum"
+ input: "graphInput"
+ input: "graphInput"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedMaximum, UnsupportedMaximumFixture)
+{
+ BOOST_CHECK_THROW(
+ SetupSingleInputSingleOutput({ 1, 1 }, "graphInput", "Maximum"),
+ armnn::ParseException);
+}
+
+struct SupportedMaximumFixture
+ : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ SupportedMaximumFixture(const std::string & maxInput0,
+ const std::string & maxInput1,
+ const std::string & mulInput0,
+ const std::string & mulInput1)
+ {
+ m_Prototext = R"(
+ node {
+ name: "graphInput"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value { type: DT_FLOAT }
+ }
+ attr {
+ key: "shape"
+ value { shape { } }
+ }
+ }
+ node {
+ name: "Alpha"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value { type: DT_FLOAT }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim { size: 1 }
+ }
+ float_val: 0.1
+ }
+ }
+ }
+ }
+ node {
+ name: "Mul"
+ op: "Mul"
+ input: ")" + mulInput0 + R"("
+ input: ")" + mulInput1 + R"("
+ attr {
+ key: "T"
+ value { type: DT_FLOAT }
+ }
+ }
+ node {
+ name: "Maximum"
+ op: "Maximum"
+ input: ")" + maxInput0 + R"("
+ input: ")" + maxInput1 + R"("
+ attr {
+ key: "T"
+ value { type: DT_FLOAT }
+ }
+ }
+ )";
+ SetupSingleInputSingleOutput({ 1, 2 }, "graphInput", "Maximum");
+ }
+};
+
+struct LeakyRelu_Max_MulAT_T_Fixture : public SupportedMaximumFixture
+{
+ LeakyRelu_Max_MulAT_T_Fixture()
+ : SupportedMaximumFixture("Mul","graphInput","Alpha","graphInput") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_MulAT_T, LeakyRelu_Max_MulAT_T_Fixture)
+{
+ RunTest<2>(std::vector<float>({-5.0, 3.0}), {-0.5, 3.0});
+}
+
+struct LeakyRelu_Max_T_MulAT_Fixture : public SupportedMaximumFixture
+{
+ LeakyRelu_Max_T_MulAT_Fixture()
+ : SupportedMaximumFixture("graphInput","Mul","Alpha","graphInput") {}
+};
+
+
+BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_T_MulAT, LeakyRelu_Max_T_MulAT_Fixture)
+{
+ RunTest<2>(std::vector<float>({-10.0, 3.0}), {-1.0, 3.0});
+}
+
+struct LeakyRelu_Max_MulTA_T_Fixture : public SupportedMaximumFixture
+{
+ LeakyRelu_Max_MulTA_T_Fixture()
+ : SupportedMaximumFixture("Mul", "graphInput","graphInput","Alpha") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_MulTA_T, LeakyRelu_Max_MulTA_T_Fixture)
+{
+ RunTest<2>(std::vector<float>({-5.0, 3.0}), {-0.5, 3.0});
+}
+
+struct LeakyRelu_Max_T_MulTA_Fixture : public SupportedMaximumFixture
+{
+ LeakyRelu_Max_T_MulTA_Fixture()
+ : SupportedMaximumFixture("graphInput", "Mul", "graphInput", "Alpha") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_T_MulTA, LeakyRelu_Max_T_MulTA_Fixture)
+{
+ RunTest<2>(std::vector<float>({-10.0, 13.0}), {-1.0, 13.0});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/MultiOutput.cpp b/src/armnnTfParser/test/MultiOutput.cpp
index 56be33dab7..7a163ef582 100644
--- a/src/armnnTfParser/test/MultiOutput.cpp
+++ b/src/armnnTfParser/test/MultiOutput.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct MultiOutMatchFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MultiOutMatchFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MultiOutMatchFixture()
{
@@ -54,7 +54,7 @@ BOOST_FIXTURE_TEST_CASE(MultiOutMatch, MultiOutMatchFixture)
RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 });
}
-struct MultiOutFailFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MultiOutFailFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MultiOutFailFixture()
{
@@ -97,7 +97,7 @@ BOOST_FIXTURE_TEST_CASE(MultiOutFail, MultiOutFailFixture)
// Not running the graph because this is expected to throw an exception during parsing.
}
-struct MultiOutInvalidFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MultiOutInvalidFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MultiOutInvalidFixture()
{
diff --git a/src/armnnTfParser/test/Multiplication.cpp b/src/armnnTfParser/test/Multiplication.cpp
index 3a20fd1141..ca9c416ca5 100644
--- a/src/armnnTfParser/test/Multiplication.cpp
+++ b/src/armnnTfParser/test/Multiplication.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct MultiplicationFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MultiplicationFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MultiplicationFixture()
{
@@ -74,7 +74,7 @@ BOOST_FIXTURE_TEST_CASE(ParseMultiplication, MultiplicationFixture)
RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 });
}
-struct MultiplicationBroadcastFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MultiplicationBroadcastFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MultiplicationBroadcastFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1)
{
diff --git a/src/armnnTfParser/test/PassThru.cpp b/src/armnnTfParser/test/PassThru.cpp
index 8462ec27cc..bba9ea579b 100644
--- a/src/armnnTfParser/test/PassThru.cpp
+++ b/src/armnnTfParser/test/PassThru.cpp
@@ -8,7 +8,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct PassThruFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct PassThruFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
PassThruFixture()
{
@@ -46,7 +46,7 @@ BOOST_FIXTURE_TEST_CASE(RunGraph, PassThruFixture)
auto input = MakeRandomTensor<float, 2>(inputTensorInfo, 378346);
std::vector<float> inputVec;
inputVec.assign(input.data(), input.data() + input.num_elements());
- RunTest<2>(inputVec, inputVec); // The passthru network should output the same as the input
+ RunTest<2>(inputVec, inputVec); // The passthru network should output the same as the input.
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Pooling.cpp b/src/armnnTfParser/test/Pooling.cpp
index 36ffa47def..f603b22afd 100644
--- a/src/armnnTfParser/test/Pooling.cpp
+++ b/src/armnnTfParser/test/Pooling.cpp
@@ -9,8 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-
-struct Pooling2dFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct Pooling2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
explicit Pooling2dFixture(const char* poolingtype)
{
diff --git a/src/armnnTfParser/test/Reshape.cpp b/src/armnnTfParser/test/Reshape.cpp
index 4eb6b12467..2fe84359fa 100644
--- a/src/armnnTfParser/test/Reshape.cpp
+++ b/src/armnnTfParser/test/Reshape.cpp
@@ -9,8 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-
-struct ReshapeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ReshapeFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ReshapeFixture()
{
diff --git a/src/armnnTfParser/test/ResizeBilinear.cpp b/src/armnnTfParser/test/ResizeBilinear.cpp
index 30d898f5bb..2aad0a651d 100644
--- a/src/armnnTfParser/test/ResizeBilinear.cpp
+++ b/src/armnnTfParser/test/ResizeBilinear.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct ResizeBilinearFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ResizeBilinearFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ResizeBilinearFixture()
{
@@ -98,11 +98,11 @@ node {
BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, ResizeBilinearFixture)
{
- RunTest<4>(// input data
+ RunTest<4>(// Input data.
{ 0.0f, 1.0f, 2.0f,
3.0f, 4.0f, 5.0f,
6.0f, 7.0f, 8.0f },
- // expected output data
+ // Expected output data.
{ 0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
diff --git a/src/armnnTfParser/test/Shape.cpp b/src/armnnTfParser/test/Shape.cpp
index 7b414ecfac..959d69bb73 100644
--- a/src/armnnTfParser/test/Shape.cpp
+++ b/src/armnnTfParser/test/Shape.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct ShapeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct ShapeFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
ShapeFixture()
{
@@ -85,9 +85,8 @@ struct ShapeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
BOOST_FIXTURE_TEST_CASE(ParseShape, ShapeFixture)
{
- // Note: the test's output cannot be an int32 const layer, because that cannot exist in the
- // as ARMNN only supports u8 and float layers. For that reason I added a reshape layer
- // which reshapes the input to its original dimensions, which is not changing it.
+ // Note: the test's output cannot be an int32 const layer, because ARMNN only supports u8 and float layers.
+ // For that reason I added a reshape layer which reshapes the input to its original dimensions.
RunTest<2>({ 0.0f, 1.0f, 2.0f, 3.0f }, { 0.0f, 1.0f, 2.0f, 3.0f });
}
diff --git a/src/armnnTfParser/test/Softmax.cpp b/src/armnnTfParser/test/Softmax.cpp
index 1ab28ea3aa..0b55816982 100644
--- a/src/armnnTfParser/test/Softmax.cpp
+++ b/src/armnnTfParser/test/Softmax.cpp
@@ -9,7 +9,7 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct SoftmaxFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct SoftmaxFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
SoftmaxFixture()
{
diff --git a/src/armnnTfParser/test/Squeeze.cpp b/src/armnnTfParser/test/Squeeze.cpp
index d2d7d49494..1722b630ac 100644
--- a/src/armnnTfParser/test/Squeeze.cpp
+++ b/src/armnnTfParser/test/Squeeze.cpp
@@ -9,9 +9,8 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-
template <bool withDimZero, bool withDimOne>
-struct SqueezeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct SqueezeFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
SqueezeFixture()
{
diff --git a/src/armnnTfParser/test/TestDependencies.cpp b/src/armnnTfParser/test/TestDependencies.cpp
index 13ab17c5b6..fa26a1c0e0 100644
--- a/src/armnnTfParser/test/TestDependencies.cpp
+++ b/src/armnnTfParser/test/TestDependencies.cpp
@@ -22,16 +22,16 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser)
// \ R3
// \|
// O
-struct RediscoveredDependenciesFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct RediscoveredDependenciesFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
RediscoveredDependenciesFixture()
{
- // input = tf.placeholder(tf.float32, 1, "input")
- // relu0 = tf.nn.relu(input, "relu0")
- // relu1 = tf.nn.relu(relu0, "relu1")
- // relu2 = tf.nn.relu(relu0, "relu2")
- // relu3 = tf.nn.relu(relu2, "relu3")
- // output = tf.add(relu1, relu3, "output")
+ // Input = tf.placeholder(tf.float32, 1, "input")
+ // Relu0 = tf.nn.relu(input, "relu0")
+ // Relu1 = tf.nn.relu(relu0, "relu1")
+ // Relu2 = tf.nn.relu(relu0, "relu2")
+ // Relu3 = tf.nn.relu(relu2, "relu3")
+ // Output = tf.add(relu1, relu3, "output")
m_Prototext = R"(
node {
name: "input"
@@ -184,12 +184,12 @@ node {
//
BOOST_AUTO_TEST_CASE(ComplexCycle)
{
- // input = tf.placeholder(tf.float32, 1, "input")
- // add2 = tf.nn.relu(input, add1, "add2") // This line won't actually run in TF, because add1 is not yet defined
- // relu1 = tf.nn.relu(relu0, "relu1")
- // relu2 = tf.nn.relu(relu0, "relu2")
- // relu3 = tf.nn.relu(relu2, "relu3")
- // add1 = tf.add(relu1, relu3, "add1")
+ // Input = tf.placeholder(tf.float32, 1, "input")
+ // Add2 = tf.nn.relu(input, add1, "add2") // This line won't actually run in TF, because add1 is not yet defined
+ // Relu1 = tf.nn.relu(relu0, "relu1")
+ // Relu2 = tf.nn.relu(relu0, "relu2")
+ // Relu3 = tf.nn.relu(relu2, "relu3")
+ // Add1 = tf.add(relu1, relu3, "add1")
const char* prototext = R"(
node {
name: "input"
diff --git a/src/armnnTfParser/test/TestMultiInputsOutputs.cpp b/src/armnnTfParser/test/TestMultiInputsOutputs.cpp
index 5eea616ec8..c7889f3966 100644
--- a/src/armnnTfParser/test/TestMultiInputsOutputs.cpp
+++ b/src/armnnTfParser/test/TestMultiInputsOutputs.cpp
@@ -9,14 +9,14 @@
BOOST_AUTO_TEST_SUITE(TensorflowParser)
-struct MultiInputsOutputsFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+struct MultiInputsOutputsFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
MultiInputsOutputsFixture()
{
- // input1 = tf.placeholder(tf.float32, shape=[], name = "input1")
- // input2 = tf.placeholder(tf.float32, shape = [], name = "input2")
- // add1 = tf.add(input1, input2, name = "add1")
- // add2 = tf.add(input1, input2, name = "add2")
+ // Input1 = tf.placeholder(tf.float32, shape=[], name = "input1")
+ // Input2 = tf.placeholder(tf.float32, shape = [], name = "input2")
+ // Add1 = tf.add(input1, input2, name = "add1")
+ // Add2 = tf.add(input1, input2, name = "add2")
m_Prototext = R"(
node {
name: "input1"