aboutsummaryrefslogtreecommitdiff
path: root/1.1
diff options
context:
space:
mode:
authorsaoste01 <saoirse.stewart@arm.com>2018-10-18 17:49:56 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 17:25:39 +0100
commitfe463150228156c29a415f45d2172a43df6ce6c3 (patch)
tree39496d6ab97ea631613ee5c0dbf33f7c3a6aafb1 /1.1
parent5e0ed9fb1e86a45617c65376c4756b8317798e5e (diff)
downloadandroid-nn-driver-fe463150228156c29a415f45d2172a43df6ce6c3.tar.gz
IVGCVSW-2020: Add converter method for TRANSPOSE to the V1.1 section of HalPolicy
* Updating SQUEEZE with comments from TRANSPOSE Change-Id: Iec48c5d583fecc5e72082d7de0fc8b1becfd84d7
Diffstat (limited to '1.1')
-rw-r--r--1.1/HalPolicy.cpp91
-rw-r--r--1.1/HalPolicy.hpp1
2 files changed, 84 insertions, 8 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index d241290f..9ca37ec0 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -35,6 +35,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertPad(operation, model, data);
case V1_1::OperationType::SQUEEZE:
return ConvertSqueeze(operation, model, data);
+ case V1_1::OperationType::TRANSPOSE:
+ return ConvertTranspose(operation, model, data);
default:
return Fail("%s: Operation type %s not supported in ArmnnDriver",
__func__, toString(operation.type).c_str());
@@ -276,7 +278,6 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
{
- static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
if (!input.IsValid())
@@ -287,20 +288,22 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
unsigned int rank = inputInfo.GetNumDimensions();
- if( rank > 4 )
+ if (rank > 4)
{
- Fail("%s: Inputs with rank greater than: %i are not supported", __func__, rank);
+ Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
}
// NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
// if the operand index is out of bounds.
const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
+ const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+
std::vector<int32_t> axis;
- if(!axisOperand)
+ if (!axisOperand)
{
axis.assign(dimensionSequence,
- dimensionSequence+rank);
+ dimensionSequence + rank);
}
else
{
@@ -309,7 +312,7 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
std::vector<uint32_t> outputDims;
- for(unsigned int i = 0; i < rank; i++)
+ for (unsigned int i = 0; i < rank; i++)
{
bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
auto currentDimension = inputInfo.GetShape()[i];
@@ -319,7 +322,7 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
}
}
- armnn::TensorShape outShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
+ armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
armnn::TensorInfo outputInfo = inputInfo;
outputInfo.SetShape(outShape);
@@ -344,7 +347,79 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
- layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
+{
+ LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+ if (!input.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+
+ unsigned int rank = inputInfo.GetNumDimensions();
+ if (rank > 4)
+ {
+ Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+ }
+
+ // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
+ // if the operand index is out of bounds.
+ const Operand* permOperand = GetInputOperand(operation, 1, model, false);
+
+ std::vector<int32_t> perm(rank);
+ if (!permOperand)
+ {
+ // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
+ for (unsigned int i = rank; i > 0; i--)
+ {
+ perm[rank - i] = boost::numeric_cast<int> (i - 1);
+ }
+ }
+ else
+ {
+ GetTensorInt32Values(*permOperand, perm, model, data);
+ }
+
+ std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
+
+ auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
+ if (!permutationVector.IsEqual(NHWCToArmNN)
+ && !permutationVector.IsEqual(ArmNNToNHWC)
+ && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
+ {
+ return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
+ }
+
+ armnn::PermuteDescriptor permuteDesc;
+ permuteDesc.m_DimMappings = permutationVector;
+
+ const Operand* output = GetOutputOperand(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ if (!IsLayerSupported(__func__,
+ armnn::IsPermuteSupported,
+ data.m_Compute,
+ inputInfo,
+ outputInfo,
+ permuteDesc))
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
+ assert(layer != nullptr);
+ input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
}
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index 06cc5743..b08ac50e 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -30,6 +30,7 @@ private:
static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);
};
} // namespace hal_1_1