diff options
-rw-r--r-- | 1.3/HalPolicy.cpp | 15 | ||||
-rw-r--r-- | 1.3/HalPolicy.hpp | 5 | ||||
-rw-r--r-- | ConversionUtils_1_3.hpp | 73 | ||||
-rw-r--r-- | NnapiSupport.txt | 3 |
4 files changed, 96 insertions, 0 deletions
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp index 37deec3d..5e456b8a 100644 --- a/1.3/HalPolicy.cpp +++ b/1.3/HalPolicy.cpp @@ -81,6 +81,12 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual); case V1_3::OperationType::LOCAL_RESPONSE_NORMALIZATION: return ConvertLocalResponseNormalization(operation, model, data); + case V1_3::OperationType::LOGICAL_AND: + return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd); + case V1_3::OperationType::LOGICAL_NOT: + return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot); + case V1_3::OperationType::LOGICAL_OR: + return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr); case V1_3::OperationType::LOGISTIC: return ConvertLogistic(operation, model, data); case V1_3::OperationType::LOG_SOFTMAX: @@ -314,6 +320,15 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation, return ::ConvertLocalResponseNormalization<hal_1_3::HalPolicy>(operation, model, data); } +bool HalPolicy::ConvertLogicalBinary(const Operation& operation, + const Model& model, + ConversionData& data, + armnn::LogicalBinaryOperation logicalOperation) +{ + ALOGV("hal_1_3::HalPolicy::ConvertLogicalBinary()"); + return ::ConvertLogicalBinary<hal_1_3::HalPolicy>(operation, model, data, logicalOperation); +} + bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_3::HalPolicy::ConvertLogistic()"); diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp index 0eb5f4d7..f82a5ef7 100644 --- a/1.3/HalPolicy.hpp +++ b/1.3/HalPolicy.hpp @@ -95,6 +95,11 @@ private: const Model& model, ConversionData& data); + static bool ConvertLogicalBinary(const Operation& operation, + const Model& model, + ConversionData& data, + armnn::LogicalBinaryOperation logicalOperation); + static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp index a7f00fc3..150735e9 100644 --- a/ConversionUtils_1_3.hpp +++ b/ConversionUtils_1_3.hpp @@ -153,6 +153,79 @@ bool ConvertFill(const HalOperation& operation, const HalModel& model, Conversio template<typename HalPolicy, typename HalOperation = typename HalPolicy::Operation, typename HalModel = typename HalPolicy::Model> +bool ConvertLogicalBinary(const HalOperation& operation, + const HalModel& model, + ConversionData& data, + LogicalBinaryOperation logicalOperation) +{ + using HalOperand = typename HalPolicy::Operand; + + ALOGV("HalPolicy::ConvertLogicalBinary()"); + ALOGV("logicalOperation = %s", GetLogicalBinaryOperationAsCString(logicalOperation)); + + LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data); + + if (!(input0.IsValid() && input1.IsValid())) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const TensorInfo& inputInfo0 = input0.GetTensorInfo(); + const TensorInfo& inputInfo1 = input1.GetTensorInfo(); + const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + LogicalBinaryDescriptor descriptor(logicalOperation); + + bool isSupported = false; + + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsLogicalBinarySupported, + data.m_Backends, + isSupported, + inputInfo0, + inputInfo1, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + + if (!isSupported) + { + return false; + } + + IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor); + assert(layer != nullptr); + + bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data); + if (!isReshapeSupported) + { + return false; + } + + return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc); +} + +template<typename HalPolicy, + typename HalOperation = typename HalPolicy::Operation, + typename HalModel = typename HalPolicy::Model> bool ConvertQuantizedLstm(const HalOperation& operation, const HalModel& model, ConversionData& data) { using HalOperand = typename HalPolicy::Operand; diff --git a/NnapiSupport.txt b/NnapiSupport.txt index 2b6eaca0..e0400e1f 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -43,6 +43,9 @@ L2_POOL_2D (FLOAT32, QUANT8_ASYMM) LESS (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) LESS_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) LOCAL_RESPONSE_NORMALIZATION (FLOAT32) +LOGICAL_AND (BOOL8) +LOGICAL_NOT (BOOL8) +LOGICAL_OR (BOOL8) LOGISTIC (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) LOG_SOFTMAX (FLOAT32) LSTM (FLOAT32) |