aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/RefLayerSupport.cpp
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-26 09:20:43 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-26 16:16:55 +0000
commit57ef0088d20dd708ff92222d244ea02f1e1e5216 (patch)
treeae11f55f6bac939a51d5182eae441d322efb3e0e /src/backends/reference/RefLayerSupport.cpp
parent9272f8b9050096f39796227c5d89ed7b9905146d (diff)
downloadarmnn-57ef0088d20dd708ff92222d244ea02f1e1e5216.tar.gz
IVGCVSW-4597 Modify BF16 optimizer to Convert only inputs and weights of
Convolution2d and FullyConnected layers * Add InsertConvertFp32ToBf16LayersBefore * Add ConvertWeight to ConvertFp32NetworkToBf16Impl for Conv2d and FullyConnected * Allow different input and output when input is BF16 and output is FP32 Conv2d and FullyConnected layers * Unit tests Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: Ic8f92ff28edcae08a72a3114a28f50c4619f919b
Diffstat (limited to 'src/backends/reference/RefLayerSupport.cpp')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp32
1 files changed, 28 insertions, 4 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 551a7b5867..7b25a436e9 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -474,8 +474,20 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
"Reference Convolution2d: output is not a supported type.");
- supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
+ if (input.GetDataType() == DataType::BFloat16)
+ {
+ if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
+ {
+ reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
+ supported = false;
+ }
+ }
+ else
+ {
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
"Reference Convolution2d: input and output types mismatched.");
+ }
const DataType inputType = input.GetDataType();
if (IsQuantized8BitType(inputType))
@@ -882,12 +894,24 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
"Reference Fully Connected: output type not supported.");
- supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference Fully Connected: input and output types mismatched.");
-
supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
"Reference Fully Connected: weights type not supported.");
+ // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
+ if (input.GetDataType() == DataType::BFloat16)
+ {
+ if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
+ {
+ reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
+ supported = false;
+ }
+ }
+ else
+ {
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference Fully Connected: input and output types mismatched.");
+ }
+
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::array<DataType, 3> supportedWeightTypes =
{