aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-07-29 16:56:31 +0100
committermike.kelly <mike.kelly@arm.com>2019-07-30 08:10:43 +0000
commit0a87936e7261fc074797111d274c40fca17d9509 (patch)
tree87d17283cf0930608bc2f4077b98fff8a56f68ab
parenta5e2a458d6279560bbe24deafc12db286be2ca10 (diff)
downloadandroid-nn-driver-0a87936e7261fc074797111d274c40fca17d9509.tar.gz
IVGCVSW-3591 Fixed unexpectedly skipped SUB tests
* A model that has Inputs with different quantized scales is not compliant with 1.1 Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ifb8277d78f05b5ef017effa879322a08c0efc851
-rw-r--r--1.1/HalPolicy.cpp56
-rw-r--r--1.2/HalPolicy.cpp9
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--ConversionUtils.hpp63
4 files changed, 74 insertions, 56 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index ab8224a0..93ee70e1 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -166,61 +166,7 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_1::HalPolicy::ConvertSub()");
-
- LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
-
- if (!input0.IsValid() || !input1.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- // The FuseActivation parameter is always the input index 2
- // and it should be optional
- ActivationFn activationFunction;
- if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicTensor(outputInfo))
- {
- return Fail("%s: Dynamic output tensors are not supported", __func__);
- }
-
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsSubtractionSupported,
- data.m_Backends,
- isSupported,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outputInfo);
- if (!isSupported)
- {
- return false;
- }
-
- armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
- armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
-
- const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
- const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
- if (endLayer)
- {
- BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
- }
-
- return Fail("%s: ProcessActivation failed", __func__);
+ return ::ConvertSub<hal_1_1::HalPolicy>(operation, model, data);
}
bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 8dbfd897..8502640c 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -61,7 +61,6 @@ bool HandledByV1_1(V1_2::OperationType operationType)
case V1_1::OperationType::SPACE_TO_BATCH_ND:
case V1_1::OperationType::SQUEEZE:
case V1_1::OperationType::STRIDED_SLICE:
- case V1_1::OperationType::SUB:
case V1_1::OperationType::TRANSPOSE:
return true;
default:
@@ -161,6 +160,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertSoftmax(operation, model, data);
case V1_2::OperationType::SPACE_TO_DEPTH:
return ConvertSpaceToDepth(operation, model, data);
+ case V1_2::OperationType::SUB:
+ return ConvertSub(operation, model, data);
case V1_2::OperationType::TANH:
return ConvertTanH(operation, model, data);
case V1_2::OperationType::LSTM:
@@ -1003,6 +1004,12 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
+bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertSub()");
+ return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
+}
+
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 74683136..285a37f1 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -66,6 +66,8 @@ private:
static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 52bfd5cc..fc6d3653 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1738,4 +1738,67 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData&
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
}
+template<typename HalPolicy,
+ typename Operation = typename HalPolicy::Operation,
+ typename Operand = typename HalPolicy::Operand,
+ typename Model = typename HalPolicy::Model>
+bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
+{
+ LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
+
+ if (!input0.IsValid() || !input1.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ // The FuseActivation parameter is always the input index 2
+ // and it should be optional
+ ActivationFn activationFunction;
+ if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsSubtractionSupported,
+ data.m_Backends,
+ isSupported,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outputInfo);
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
+ armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+ const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+ const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+ if (endLayer)
+ {
+ BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+ }
+
+ return Fail("%s: ProcessActivation failed", __func__);
+}
+
+
} // namespace armnn_driver