From ea54a01f6bd30f013cbe88ae1751985bc86b6af5 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 16 Mar 2020 16:36:10 +0000 Subject: IVGCVSW-4516 Add ConvertFp32ToBf16Layer and Ref workload support Signed-off-by: Narumol Prangnawarat Change-Id: I9099a4f840fb747336f77d20a0868b64e801a310 --- src/backends/reference/RefLayerSupport.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'src/backends/reference/RefLayerSupport.cpp') diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index a4f4efd92a..9f22b9ef0e 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -411,6 +411,21 @@ bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, &FalseFuncU8<>)); } +bool RefLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + bool supported = true; + + supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported, + "Reference for ConvertFp32ToBf16 layer: input type not supported"); + + supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported, + "Reference for ConvertFp32ToBf16 layer: output type not supported"); + + return supported; +} + bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const -- cgit v1.2.1