From 358da71e834bb1e66a528a7e7ffd1f97087e17c5 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 13 Dec 2019 11:54:00 +0000 Subject: MLCE-144 Disabling NCHW and Float16 * Disabled support for NCHW * Disabled support for Float16 Signed-off-by: Mike Kelly Change-Id: I0f5ee8a8ddda1edc7800f167d6a82e4a15c6069b --- 1.2/HalPolicy.cpp | 23 +++++++++++++++++++++++ ConversionUtils.hpp | 21 ++++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index ed963c4d..3b1200b5 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -172,6 +172,10 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data); } + if (desc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1}; // ArmNN does not currently support non-fixed weights or bias @@ -335,6 +339,10 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11; desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data); + if (desc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout); unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex(); unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); @@ -1082,6 +1090,11 @@ bool HalPolicy::ConvertResize(const Operation& operation, descriptor.m_Method = resizeMethod; descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data); + if (descriptor.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } + OperandType operandType1; OperandType operandType2; @@ -1214,6 +1227,11 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data); + if (desc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } + bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsSpaceToDepthSupported, @@ -1809,6 +1827,11 @@ bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data); } + if (desc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } + armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout); unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex(); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 38253b0a..0cca619e 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -181,7 +181,6 @@ inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type) inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type) { return type == V1_2::OperandType::BOOL || - type == V1_2::OperandType::TENSOR_FLOAT16 || type == V1_2::OperandType::TENSOR_FLOAT32 || type == V1_2::OperandType::TENSOR_QUANT8_ASYMM || type == V1_2::OperandType::TENSOR_QUANT16_SYMM || @@ -1430,6 +1429,11 @@ bool ConvertPooling2d(const HalOperation& operation, if (Is12Operand(*output)) { desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data); + + if (desc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", operationName); + } } } else @@ -1449,6 +1453,11 @@ bool ConvertPooling2d(const HalOperation& operation, if (Is12Operand(*output)) { desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data); + + if (desc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", operationName); + } } const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout); @@ -3205,6 +3214,11 @@ bool ConvertBatchToSpaceNd(const HalOperation& operation, if (Is12Operand(*output)) { batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data); + + if (batchToSpaceNdDesc.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } } // Setting crops to 0,0 0,0 as it is not supported in Android NN API batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}}; @@ -3307,6 +3321,11 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, if (Is12Operand(*output)) { descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data); + + if (descriptor.m_DataLayout == armnn::DataLayout::NCHW) + { + return Fail("%s: Operation has invalid inputs NCHW is not supported", __func__); + } } bool isSupported = false; -- cgit v1.2.1