aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2020-10-08 14:33:13 +0100
committerFinn Williams <Finn.Williams@arm.com>2020-10-09 11:57:28 +0100
commitb0331179a3779a967d5081dffdc2046f14f194d1 (patch)
tree59416f9bbd38f86196fd51766def4b482b22dfe8
parent9bf0d32afa417d94dccc2e91e9ed50230c346ac6 (diff)
downloadandroid-nn-driver-b0331179a3779a967d5081dffdc2046f14f194d1.tar.gz
IVGCVSW-5357 Fix skipping Dynamic Grouped Conv2d tests
Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: If1be1fcd289e83661a0ad03574a36cbbd9266c88
-rw-r--r--ConversionUtils_1_2.hpp92
1 files changed, 57 insertions, 35 deletions
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index e4bc05f9..256be4a5 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -805,7 +805,7 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
{
return Fail("%s: Could not read output 0", __func__);
}
- const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ TensorInfo outputInfo = GetTensorInfoForOperand(*output);
// Look ahead to determine data layout
DataLayout dataLayout = DataLayout::NHWC;
@@ -896,7 +896,8 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
return Fail("%s: Unsupported number of operation inputs", __func__);
}
- const unsigned int outputChannels = outputShape[channelsIndex];
+ // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
+ const unsigned int outputChannels = weightsShape[0];
const unsigned int channelsPerGroup = weightsShape[channelsIndex];
const unsigned int channelMultiplier = outputChannels / numGroups;
@@ -971,9 +972,6 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
TensorShape groupInputShape(inputShape);
groupInputShape[channelsIndex] = channelsPerGroup;
- TensorShape groupOutputShape(outputShape);
- groupOutputShape[channelsIndex] = 1;
-
TensorShape groupWeightsShape(weightsShape);
groupWeightsShape[0] /= channelMultiplier * numGroups;
@@ -992,6 +990,13 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
groupBiasesInfo.SetShape(groupBiasesShape);
TensorInfo groupOutputInfo(outputInfo);
+
+ TensorShape groupOutputShape(outputShape);
+ const bool isDynamic = IsDynamicTensor(outputInfo);
+ if (!isDynamic)
+ {
+ groupOutputShape[channelsIndex] = 1;
+ }
groupOutputInfo.SetShape(groupOutputShape);
const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
@@ -1031,15 +1036,28 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
biasesDataOffset));
isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsConvolution2dSupported,
- data.m_Backends,
- isSupported,
- groupInputInfo,
- groupOutputInfo,
- desc,
- groupWeightsInfo,
- Optional<TensorInfo>(groupBiasesInfo));
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsConvolution2dSupported,
+ data.m_Backends,
+ isSupported,
+ groupInputInfo,
+ outputInfo,
+ desc,
+ groupWeightsInfo,
+ Optional<TensorInfo>(groupBiasesInfo));
+ };
+
+ if(!isDynamic)
+ {
+ validateFunc(groupOutputInfo, isSupported);
+ }
+ else
+ {
+ isSupported = AreDynamicTensorsSupported();
+ }
+
if (!isSupported)
{
return false;
@@ -1055,6 +1073,20 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
+ if(isDynamic)
+ {
+ convLayer->GetOutputSlot(0).IsTensorInfoSet();
+
+ validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
+
+ outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
+
+ if (!isSupported)
+ {
+ return false;
+ }
+ }
+
convLayers[index] = convLayer;
}
}
@@ -1062,7 +1094,9 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
//
// Set up Concat layer
//
- ConcatDescriptor concatDescriptor(outputInfo.GetShape()[channelsIndex]);
+ ConcatDescriptor concatDescriptor;
+ // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
+ concatDescriptor = ConcatDescriptor(weightsShape[0]);
for (unsigned int group = 0u; group < numGroups; ++group)
{
for (unsigned int m = 0u; m < channelMultiplier; ++m)
@@ -1074,25 +1108,13 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
}
isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsConcatSupported,
- data.m_Backends,
- isSupported,
- std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
- outputInfo,
- concatDescriptor);
- };
-
- if(!IsDynamicTensor(outputInfo))
- {
- validateFunc(outputInfo, isSupported);
- }
- else
- {
- isSupported = AreDynamicTensorsSupported();
- }
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsConcatSupported,
+ data.m_Backends,
+ isSupported,
+ std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
+ outputInfo,
+ concatDescriptor);
if (!isSupported)
{
@@ -1116,7 +1138,7 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *concatLayer, model,
- data, nullptr, validateFunc, activation);
+ data, nullptr, nullptr, activation);
}
template<typename HalPolicy,