aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/WorkloadFactory.cpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-11-13 15:16:28 +0000
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-11-15 16:54:47 +0000
commit87972be8d838f6fde6f6e98dd81c422e85457a5e (patch)
tree78e8a9abfefc6db67f9a71f6c1fddb0444daac5f /src/backends/backendsCommon/WorkloadFactory.cpp
parent5716de25c6981d004e32b81dc65b4869eda25f7c (diff)
downloadarmnn-87972be8d838f6fde6f6e98dd81c422e85457a5e.tar.gz
IVGCVSW-4119 Fix FP16 to FP32 fallback mechanism in optimizer to work with Dequantize
* Check for output data type as well as input data type when determining whether we should attempt to fall back to FP32 if FP16 is not supported * Override output type for Dequantize in IsLayerSupported() instead of input type * Updated original input type from FP16 to FP32 in InsertConvertFp32ToFp16LayersAfter() Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Ic6477fd17cea5a91bd8bf9ae0cf836520897d5b7
Diffstat (limited to 'src/backends/backendsCommon/WorkloadFactory.cpp')
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 4a7f007c2e..9901dcb7c1 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -265,8 +265,8 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType),
- output,
+ result = layerSupportObject->IsDequantizeSupported(input,
+ OverrideDataType(output, dataType),
reason);
break;
}