aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/layers/ConvertFp32ToFp16Layer.cpp')
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 7ff98ed898..0a126e2284 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ConvertFp32ToFp16Layer.hpp"
@@ -31,18 +31,18 @@ ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
- IgnoreUnused(shapeInferenceMethod);
VerifyLayerConnections(1, CHECK_LOCATION());
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ConditionalThrowIfNotEqual<LayerValidationException>(
- "ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
- GetOutputSlot(0).GetTensorInfo().GetShape(),
- inferredShapes[0]);
+ ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
}
void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const