aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-07-25 09:09:40 +0100
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-25 11:31:48 +0100
commit6111316eb609bd71589b963cf6fc56b18ba3d241 (patch)
tree105d8a0aa72d18cfc246b37911d460f747c13220
parentc921f6baf18ca05f14a41097b4e075e2d4fc7479 (diff)
downloadandroid-nn-driver-6111316eb609bd71589b963cf6fc56b18ba3d241.tar.gz
IVGCVSW-3530 Fix DynamicOutput Tests for Android Q NeuralNetworks 1.0 & 1.1
* Fixed for failing Conv2d, DepthwiseConv2d, and Activation tests on Hal 1.0 and 1.1 in Q Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Signed-off-by: Aron Virginas-Tar <aron.virginas-tar@arm.com> Change-Id: I435338b90b6c501320083f2fd9372e3a4ac3c32c
-rw-r--r--1.0/HalPolicy.cpp29
-rw-r--r--1.0/HalPolicy.hpp4
-rw-r--r--1.2/HalPolicy.cpp36
-rw-r--r--1.2/HalPolicy.hpp8
-rw-r--r--ConversionUtils.hpp95
5 files changed, 117 insertions, 55 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index b9200f14..7a54e74f 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -1142,34 +1142,19 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
-
- armnn::ActivationDescriptor desc;
- desc.m_Function = armnn::ActivationFunction::ReLu;
-
- return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+ return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
}
bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
-
- armnn::ActivationDescriptor desc;
- desc.m_Function = armnn::ActivationFunction::BoundedReLu;
- desc.m_A = 1.0f;
- desc.m_B = -1.0f;
-
- return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+ return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
}
bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
-
- armnn::ActivationDescriptor desc;
- desc.m_Function = armnn::ActivationFunction::BoundedReLu;
- desc.m_A = 6.0f;
-
- return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+ return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
}
bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
@@ -1285,13 +1270,7 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
-
- armnn::ActivationDescriptor desc;
- desc.m_Function = armnn::ActivationFunction::TanH;
- desc.m_A = 1.0f; // android nn does not support tanH parameters
- desc.m_B = 1.0f; // set to 1.0f for unity scaling
-
- return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+ return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
}
bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
index 833017b5..4b8dc47a 100644
--- a/1.0/HalPolicy.hpp
+++ b/1.0/HalPolicy.hpp
@@ -70,14 +70,14 @@ private:
static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ValidateConv2dParameters(const Operation& operation);
static bool ValidateDepthwiseConv2dParameters(const Operation& operation);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 2d6d7970..5f327c20 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -42,14 +42,10 @@ bool HandledByV1_0(V1_2::OperationType operationType)
case V1_0::OperationType::LSTM:
case V1_0::OperationType::MAX_POOL_2D:
case V1_0::OperationType::MUL:
- case V1_0::OperationType::RELU:
- case V1_0::OperationType::RELU1:
- case V1_0::OperationType::RELU6:
case V1_0::OperationType::RESHAPE:
case V1_0::OperationType::RNN:
case V1_0::OperationType::SPACE_TO_DEPTH:
case V1_0::OperationType::SVDF:
- case V1_0::OperationType::TANH:
case V1_0::OperationType::OEM_OPERATION:
return true;
default:
@@ -151,12 +147,20 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertPadV2(operation, model, data);
case V1_2::OperationType::PRELU:
return ConvertPrelu(operation, model, data);
+ case V1_2::OperationType::RELU:
+ return ConvertReLu(operation, model, data);
+ case V1_2::OperationType::RELU1:
+ return ConvertReLu1(operation, model, data);
+ case V1_2::OperationType::RELU6:
+ return ConvertReLu6(operation, model, data);
case V1_2::OperationType::RESIZE_BILINEAR:
return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
case V1_2::OperationType::SOFTMAX:
return ConvertSoftmax(operation, model, data);
+ case V1_2::OperationType::TANH:
+ return ConvertTanH(operation, model, data);
default:
return Fail("%s: Operation type %s not supported in ArmnnDriver",
__func__, toString(operation.type).c_str());
@@ -779,6 +783,24 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
armnn::Optional<armnn::TensorInfo>(outputInfo));
}
+bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
+ return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
+ return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
+ return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
+}
+
bool HalPolicy::ConvertResize(const Operation& operation,
const Model& model,
ConversionData& data,
@@ -1030,5 +1052,11 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
armnn::Optional<armnn::TensorInfo>(outputInfo));
}
+bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
+ return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
+}
+
} // namespace hal_1_2
} // namespace armnn_driver
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index bac765da..d11ae3ca 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -45,6 +45,12 @@ private:
static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertResize(const Operation& operation,
const Model& model,
ConversionData& data,
@@ -53,6 +59,8 @@ private:
static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
};
} // namespace hal_1_2
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index f84dc108..790382d6 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -194,6 +194,11 @@ inline bool IsBool(V1_0::Operand)
return false;
}
+inline bool Is12Operand(V1_0::Operand)
+{
+ return false;
+}
+
#ifdef ARMNN_ANDROID_NN_V1_2
inline bool IsBool(V1_2::Operand operand)
@@ -201,6 +206,12 @@ inline bool IsBool(V1_2::Operand operand)
return operand.type == V1_2::OperandType::BOOL;
}
+/// Checks if a operand is 1_2 Operand
+inline bool Is12Operand(V1_2::Operand)
+{
+ return true;
+}
+
#endif
template<typename LayerHandleType>
@@ -1161,8 +1172,15 @@ bool ConvertToActivation(const HalOperation& operation,
armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicTensor(outInfo))
{
- ALOGD("Output shape not set, will infer from input");
- outInfo.SetShape(input.GetTensorInfo().GetShape());
+ if (Is12Operand(*outputOperand))
+ {
+ ALOGD("Output shape not set, will infer from input");
+ outInfo.SetShape(input.GetTensorInfo().GetShape());
+ }
+ else
+ {
+ return Fail("%s: Dynamic OutputShapes are not supported in this HAL version", __func__);
+ }
}
bool isSupported = false;
@@ -1190,6 +1208,55 @@ bool ConvertToActivation(const HalOperation& operation,
}
template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ armnn::ActivationDescriptor desc;
+ desc.m_Function = armnn::ActivationFunction::ReLu;
+
+ return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ armnn::ActivationDescriptor desc;
+ desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ desc.m_A = 1.0f;
+ desc.m_B = -1.0f;
+
+ return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ armnn::ActivationDescriptor desc;
+ desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ desc.m_A = 6.0f;
+
+ return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ armnn::ActivationDescriptor desc;
+ desc.m_Function = armnn::ActivationFunction::TanH;
+ desc.m_A = 1.0f; // android nn does not support tanH parameters
+ desc.m_B = 1.0f; // set to 1.0f for unity scaling
+
+ return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
bool ConvertPaddings(const HalOperation& operation,
@@ -1420,17 +1487,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
if (IsDynamicTensor(outputInfo))
{
- try
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
- weights.GetInfo().GetShape(),
- desc));
- }
- catch (armnn::Exception& e)
- {
- return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
- }
+ return Fail("%s: Dynamic OutputShapes are not supported", __func__);
}
bool isSupported = false;
@@ -1600,17 +1657,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
if (IsDynamicTensor(outputInfo))
{
- try
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
- weights.GetInfo().GetShape(),
- desc));
- }
- catch (armnn::Exception& e)
- {
- return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
- }
+ return Fail("%s: Dynamic OutputShapes are not supported", __func__);
}
bool isSupported = false;