diff options
-rw-r--r-- | 1.2/HalPolicy.cpp | 86 | ||||
-rw-r--r-- | 1.2/HalPolicy.hpp | 2 | ||||
-rw-r--r-- | NnapiSupport.txt | 1 |
3 files changed, 89 insertions, 0 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 019f5054..55df9dab 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -64,6 +64,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertLocalResponseNormalization(operation, model, data); case V1_2::OperationType::LOGISTIC: return ConvertLogistic(operation, model, data); + case V1_2::OperationType::LOG_SOFTMAX: + return ConvertLogSoftmax(operation, model, data); case V1_2::OperationType::LSTM: return ConvertLstm(operation, model, data); case V1_2::OperationType::MAX_POOL_2D: @@ -998,6 +1000,90 @@ bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, return ::ConvertLogistic<hal_1_2::HalPolicy>(operation, model, data); } +bool HalPolicy::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertLogSoftmax()"); + + LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Failed to read input 0", __func__); + } + + const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); + if (!output) + { + return Fail("%s: Failed to read output", __func__); + } + + const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // Determine data type of input tensor + OperandType inputType; + if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, inputType)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + LogSoftmaxDescriptor descriptor; + + // Read beta + if (inputType == OperandType::TENSOR_FLOAT16) + { + Half fp16Beta; + if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::FLOAT16, fp16Beta, model, data)) + { + return Fail("%s: Failed to read input 1 (FLOAT16)", __func__); + } + + descriptor.m_Beta = static_cast<float>(fp16Beta); + } + else if (inputType == OperandType::TENSOR_FLOAT32) + { + if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data)) + { + return Fail("%s: Failed to read input 1 (FLOAT32)", __func__); + } + } + else + { + return Fail("%s: Unsupported input tensor type: %d", __func__, inputType); + } + + // Read axis + if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_Axis, model, data)) + { + return Fail("%s: Failed to read input 2", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsLogSoftmaxSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor); + if (!layer) + { + return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__); + } + + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data); +} + bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index aa69f127..743ac11e 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -71,6 +71,8 @@ private: static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data); diff --git a/NnapiSupport.txt b/NnapiSupport.txt index 47a1574a..f895300c 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -31,6 +31,7 @@ L2_NORMALIZATION (FLOAT32) L2_POOL_2D (FLOAT32,QUANT8_ASYMM) LOCAL_RESPONSE_NORMALIZATION (FLOAT32) LOGISTIC (FLOAT32,QUANT8_ASYMM) +LOG_SOFTMAX (FLOAT32) LSTM (FLOAT32) MAXIMUM (FLOAT32,QUANT8_ASYMM) MAX_POOL_2D (FLOAT32,QUANT8_ASYMM) |