From 0a87936e7261fc074797111d274c40fca17d9509 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Mon, 29 Jul 2019 16:56:31 +0100 Subject: IVGCVSW-3591 Fixed unexpectedly skipped SUB tests * A model that has Inputs with different quantized scales is not compliant with 1.1 Signed-off-by: Mike Kelly Change-Id: Ifb8277d78f05b5ef017effa879322a08c0efc851 --- 1.1/HalPolicy.cpp | 56 +---------------------------------------------- 1.2/HalPolicy.cpp | 9 +++++++- 1.2/HalPolicy.hpp | 2 ++ ConversionUtils.hpp | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 56 deletions(-) diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index ab8224a0..93ee70e1 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -166,61 +166,7 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertSub()"); - - LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); - LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); - - if (!input0.IsValid() || !input1.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - // The FuseActivation parameter is always the input index 2 - // and it should be optional - ActivationFn activationFunction; - if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSubtractionSupported, - data.m_Backends, - isSupported, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outputInfo); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer(); - armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); - - if (endLayer) - { - BroadcastTensor(input0, input1, startLayer, *data.m_Network); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); - } - - return Fail("%s: ProcessActivation failed", __func__); + return ::ConvertSub(operation, model, data); } bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 8dbfd897..8502640c 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -61,7 +61,6 @@ bool HandledByV1_1(V1_2::OperationType operationType) case V1_1::OperationType::SPACE_TO_BATCH_ND: case V1_1::OperationType::SQUEEZE: case V1_1::OperationType::STRIDED_SLICE: - case V1_1::OperationType::SUB: case V1_1::OperationType::TRANSPOSE: return true; default: @@ -161,6 +160,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertSoftmax(operation, model, data); case V1_2::OperationType::SPACE_TO_DEPTH: return ConvertSpaceToDepth(operation, model, data); + case V1_2::OperationType::SUB: + return ConvertSub(operation, model, data); case V1_2::OperationType::TANH: return ConvertTanH(operation, model, data); case V1_2::OperationType::LSTM: @@ -1003,6 +1004,12 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertSub()"); + return ::ConvertSub(operation, model, data); +} + bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertTanH()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 74683136..285a37f1 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -66,6 +66,8 @@ private: static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 52bfd5cc..fc6d3653 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1738,4 +1738,67 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +template +bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSubtractionSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + + return Fail("%s: ProcessActivation failed", __func__); +} + + } // namespace armnn_driver -- cgit v1.2.1