aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/StandInLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/layers/StandInLayer.cpp')
-rw-r--r--src/armnn/layers/StandInLayer.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d0fc325caa..d23d1d0bad 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -16,7 +16,7 @@ StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
// This throws in the event that it's called. We would expect that any backend that
// "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
// during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -30,7 +30,7 @@ StandInLayer* StandInLayer::Clone(Graph& graph) const
std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
throw Exception("Stand in layer does not support infering output shapes");
}