aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/NeonLayerSupport.cpp
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-30 16:11:04 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-31 09:29:40 +0100
commit250d3927b16abe4d6932cd5dce1184bd7026a2b7 (patch)
treef73603873c0fbd692fbcbbd242d2a45cef6dc890 /src/backends/neon/NeonLayerSupport.cpp
parente2062cdf1eb31b87860f9889f0e799e89f0dfa30 (diff)
downloadarmnn-250d3927b16abe4d6932cd5dce1184bd7026a2b7.tar.gz
IVGCVSW-4633 Add conversion of BF16 support to Neon
* Add NeonConvertBf16ToFp32Workload * Add NeonConvertFp32ToBf16Workload * Add BFloat16 type support to NeonConstantWorkload and NeonTensorHandle * Add ConvertBf16ToFp32Weight when ConvertBf16ToFp32Layer is added * Unit tests Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: Id5b44a203add5e0c98c1ca4e2162115741b56644
Diffstat (limited to 'src/backends/neon/NeonLayerSupport.cpp')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp20
1 files changed, 20 insertions, 0 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index c01a178f18..44e84fb974 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -259,6 +259,16 @@ bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
&TrueFunc<>);
}
+bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
+ return true;
+}
+
bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -269,6 +279,16 @@ bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
return true;
}
+bool NeonLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
+ return true;
+}
+
bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const