diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-11-16 14:27:52 +0000 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2020-11-17 12:21:42 +0000 |
commit | aa41d5d2f43790938f3a32586626be5ef55b6ca9 (patch) | |
tree | 9b6daa8b9b80b7c6d939c894d1ae3715a565bdac /src/armnn/optimizations | |
parent | 7c0e3fd7596abde1cf2eca96147e1c37491e1816 (diff) | |
download | armnn-aa41d5d2f43790938f3a32586626be5ef55b6ca9.tar.gz |
IVGCVSW-5530 'Cannot run SSD Mobilenet f16/uint8 on CpuRef via ExecuteNetwork'
* Added FP16 DataType support to DetectionPostProcess
* For DetectionPostProcess layer output is always Float32 regardless of input type
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I21f63dd08f0863e9a98e105b3009bab3da1ab0c3
Diffstat (limited to 'src/armnn/optimizations')
-rw-r--r-- | src/armnn/optimizations/ConvertFp32NetworkToFp16.hpp | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/src/armnn/optimizations/ConvertFp32NetworkToFp16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToFp16.hpp index 9658a35560..6aa618f7b4 100644 --- a/src/armnn/optimizations/ConvertFp32NetworkToFp16.hpp +++ b/src/armnn/optimizations/ConvertFp32NetworkToFp16.hpp @@ -28,13 +28,18 @@ public: } else if (layer.GetType() == LayerType::Output) { - // if the inputs of this layer are DataType::Float32 - // add a ConvertFloat16ToFloat32 layer before each of the inputs - if (layer.GetDataType() == DataType::Float32) + // For DetectionPostProcess Layer output is always Float32 regardless of input type + Layer& connectedLayer = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOwningLayer(); + if (connectedLayer.GetType() != LayerType::DetectionPostProcess) { - // NOTE: We need to call InsertConvertFp16ToFp32LayersBefore with expectCorrectInputType = false - // here, otherwise it will expect the inputs to be DataType::Float16 - InsertConvertFp16ToFp32LayersBefore(graph, layer, false); + // if the inputs of this layer are DataType::Float32 + // add a ConvertFloat16ToFloat32 layer before each of the inputs + if (layer.GetDataType() == DataType::Float32) + { + // NOTE: We need to call InsertConvertFp16ToFp32LayersBefore with expectCorrectInputType = false + // here, otherwise it will expect the inputs to be DataType::Float16 + InsertConvertFp16ToFp32LayersBefore(graph, layer, false); + } } } else if (layer.GetType() != LayerType::ConvertFp32ToFp16 && layer.GetType() != LayerType::ConvertFp16ToFp32) @@ -57,14 +62,18 @@ public: } } - // change outputs to DataType::Float16 - for (auto&& output = layer.BeginOutputSlots(); output != layer.EndOutputSlots(); ++output) + // For DetectionPostProcess Layer output is always Float32 regardless of input type + if (layer.GetType() != LayerType::DetectionPostProcess) { - TensorInfo convertInfo = output->GetTensorInfo(); - if (convertInfo.GetDataType() == DataType::Float32) + // change outputs to DataType::Float16 + for (auto&& output = layer.BeginOutputSlots(); output != layer.EndOutputSlots(); ++output) { - convertInfo.SetDataType(DataType::Float16); - output->SetTensorInfo(convertInfo); + TensorInfo convertInfo = output->GetTensorInfo(); + if (convertInfo.GetDataType() == DataType::Float32) + { + convertInfo.SetDataType(DataType::Float16); + output->SetTensorInfo(convertInfo); + } } } } |