aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-07-02 16:53:10 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-07-05 12:58:25 +0100
commit0bd89a87f15bf0983eace53df1160a3e64bc0e75 (patch)
treecb6343a43938b22685614b5242cd341aeca94174
parentfb2fa29e83e5ed7cd8ddf90ffb95946e7498f365 (diff)
downloadandroid-nn-driver-0bd89a87f15bf0983eace53df1160a3e64bc0e75.tar.gz
IVGCVSW-3370 Add broadcasting support to PReLU to properly run the Android VTS/NN
tests * Updated ConvertPrelu to support input broadcasting * Updated the BroadcastTensor utility function so that it preserves the order of the inputs * Updated the documentation Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com> Change-Id: Ibbac6f7161132740e61c85f597f8be70cd5d7325
-rw-r--r--1.2/ArmnnDriverImpl.cpp4
-rw-r--r--1.2/HalPolicy.cpp5
-rw-r--r--ConversionUtils.hpp125
-rw-r--r--NnapiSupport.txt2
4 files changed, 77 insertions, 59 deletions
diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp
index 87ef08c2..3b2cb744 100644
--- a/1.2/ArmnnDriverImpl.cpp
+++ b/1.2/ArmnnDriverImpl.cpp
@@ -80,7 +80,7 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime
const sp<V1_2::IPreparedModelCallback>& cb,
bool float32ToFloat16)
{
- ALOGV("ArmnnDriverImpl::prepareModel()");
+ ALOGV("ArmnnDriverImpl::prepareArmnnModel_1_2()");
if (cb.get() == nullptr)
{
@@ -290,4 +290,4 @@ Return<void> ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runt
}
} // namespace hal_1_2
-} // namespace armnn_driver \ No newline at end of file
+} // namespace armnn_driver
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 1c6159ef..e058e026 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -443,7 +443,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
if (!output)
{
- return Fail("%s: Could not read output 0", __func__);
+ return Fail("%s: Could not read output", __func__);
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
@@ -467,8 +467,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
return Fail("%s: AddPreluLayer failed", __func__);
}
- input.Connect(layer->GetInputSlot(0));
- alpha.Connect(layer->GetInputSlot(1));
+ BroadcastTensor(input, alpha, layer, *data.m_Network);
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 36bc4ae1..9a711cb7 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -201,55 +201,91 @@ inline bool IsBool(V1_2::Operand operand)
#endif
-void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
- armnn::INetwork& network)
+template<typename LayerHandleType>
+armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
+ armnn::TensorInfo reshapeInfo)
+{
+ armnn::ReshapeDescriptor reshapeDescriptor;
+ reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
+
+ armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
+ BOOST_ASSERT(reshapeLayer != nullptr);
+
+ // Attach the input layer to the reshape layer
+ inputLayer.Connect(reshapeLayer->GetInputSlot(0));
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
+
+ return *reshapeLayer;
+}
+
+void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
+ armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
{
BOOST_ASSERT(startLayer != nullptr);
- const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
- const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
- if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+ const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
+
+ unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
+ unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
+
+ if (inputDimensions0 == inputDimensions1)
{
- // If the number of dimensions do not match then we need to add degenerate dimensions
- // to the "smaller" tensor using a reshape:
- // Small Big
- // | |
- // Reshape |
- // \ /
- // Add
- bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
+ // The inputs have the same number of dimensions, simply connect them to the given layer as they are
+ input0.Connect(startLayer->GetInputSlot(0));
+ input1.Connect(startLayer->GetInputSlot(1));
- LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
- const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
+ return;
+ }
- LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
- const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
+ // Since the number of dimensions do not match then we need to add degenerate dimensions
+ // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
- const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
- std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
- unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
- for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
- {
- reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
- }
- armnn::TensorInfo reshapedInfo = smallTensorDims;
- reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
- reshapedDims.data() });
+ unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
+ unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
+ boost::numeric_cast<int>(inputDimensions1));
- armnn::ReshapeDescriptor reshapeDesc;
- reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
- armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
- smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+ bool input0IsSmaller = inputDimensions0 < inputDimensions1;
+ LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
+ const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
- // Connect the outputs from new reshape and original input layer
- reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
- bigTensorHandle.Connect(startLayer->GetInputSlot(1));
+ const armnn::TensorShape& smallShape = smallInfo.GetShape();
+ std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
+ for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
+ {
+ reshapedDimensions[i] = smallShape[i - sizeDifference];
+ }
+
+ armnn::TensorInfo reshapedInfo = smallInfo;
+ reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
+ reshapedDimensions.data() });
+ armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
+
+ if (input0IsSmaller)
+ {
+ // Input0 is the "smaller" tensor, connect the reshape layer as follows:
+ //
+ // Input0 Input1
+ // | |
+ // Reshape |
+ // \ /
+ // StartLayer
+
+ reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
+ input1.Connect(startLayer->GetInputSlot(1));
}
else
{
+ // Input1 is the "smaller" tensor, connect the reshape layer as follows:
+ //
+ // Input0 Input1
+ // | |
+ // | Reshape
+ // \ /
+ // StartLayer
+
input0.Connect(startLayer->GetInputSlot(0));
- input1.Connect(startLayer->GetInputSlot(1));
+ reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
}
}
@@ -402,23 +438,6 @@ bool RequiresReshape(armnn::TensorShape & inputShape)
return inputShape.GetNumDimensions() < 3;
}
-template<typename OSlot>
-armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
- armnn::TensorInfo reshapeInfo)
-{
- armnn::ReshapeDescriptor reshapeDescriptor;
- reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
-
- armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
- BOOST_ASSERT(reshapeLayer != nullptr);
-
- // Attach the input layer to the reshape layer
- inputLayer.Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
-
- return *reshapeLayer;
-}
-
void SwizzleInputs(armnn::INetwork& network,
std::vector<LayerInputHandle>& inputs,
std::vector<armnn::TensorShape>& inputShapes,
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index d412c086..79626614 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -49,6 +49,7 @@ The following AndroidNN HAL 1.2 operations are currently supported:
CONV_2D (FLOAT32,QUANT8_ASYMM)
DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM)
+PRELU (FLOAT32,QUANT8_ASYMM)
RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM)
--- Unsupported operators ---
@@ -72,7 +73,6 @@ MAXIMUM
PAD_V2
QUANTIZE
QUANTIZED_16BIT_LSTM
-PRELU
TRANSPOSE_CONV_2D
Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework