aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-04-15 18:37:35 +0100
committerderek.lamberti <derek.lamberti@arm.com>2019-04-16 13:50:11 +0000
commit0790dcea1056298d63f97dec904c8ade5d21f439 (patch)
treed75967e2eabe39ec08dc928fa77a7d4a51d85c5d /src/armnn
parentb98bbcfa2a809c4ad025883c059ae49c82b37cbd (diff)
downloadarmnn-0790dcea1056298d63f97dec904c8ade5d21f439.tar.gz
IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic
+ Update clframework pin + Cl and Neon Merger workloads updated to use MemoryLayout agnostic API + Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors + Refactor LayerSupportCommon code to be a bit more succinct Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/LayerSupportCommon.hpp49
-rw-r--r--src/armnn/Tensor.cpp14
-rw-r--r--src/armnn/layers/MergerLayer.cpp56
3 files changed, 79 insertions, 40 deletions
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index 70b5f182f4..3e2a1241db 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -12,6 +12,15 @@
namespace armnn
{
+template<typename T, typename V>
+void SetValueChecked(Optional<T&> optionalRef, V&& val)
+{
+ if (optionalRef)
+ {
+ optionalRef.value() = val;
+ }
+}
+
template<typename Float16Func, typename Float32Func, typename Uint8Func, typename Int32Func, typename BooleanFunc,
typename ... Params>
bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
@@ -55,80 +64,56 @@ bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
template<typename ... Params>
bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with float16 data type";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
return false;
}
template<typename ... Params>
bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with float32 data type";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
return false;
}
template<typename ... Params>
bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with 8-bit data type";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
return false;
}
template<typename ... Params>
bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with int32 data type";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
return false;
}
template<typename ... Params>
bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with float32 data type input";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
return false;
}
template<typename ... Params>
bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with float16 data type input";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
return false;
}
template<typename ... Params>
bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with float32 data type output";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
return false;
}
template<typename ... Params>
bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "Layer is not supported with float16 data type output";
- }
+ SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
return false;
}
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index da19e5b97a..614abc77f5 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -188,6 +188,20 @@ unsigned int TensorInfo::GetNumBytes() const
return GetDataTypeSize(m_DataType) * GetNumElements();
}
+bool TensorInfo::IsTypeSpaceMatch(const TensorInfo& other) const
+{
+ bool match = true;
+
+ match &= m_DataType == other.m_DataType;
+
+ if (IsQuantized())
+ {
+ match &= GetQuantizationScale() == other.GetQuantizationScale() &&
+ GetQuantizationOffset() == other.GetQuantizationOffset();
+ }
+ return match;
+}
+
// ---
// --- BaseTensor
// ---
diff --git a/src/armnn/layers/MergerLayer.cpp b/src/armnn/layers/MergerLayer.cpp
index f87f34925f..c674f64f3f 100644
--- a/src/armnn/layers/MergerLayer.cpp
+++ b/src/armnn/layers/MergerLayer.cpp
@@ -36,14 +36,12 @@ std::unique_ptr<IWorkload> MergerLayer::CreateWorkload(const Graph& graph, const
void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
{
- //If sub tensors are supported than the merger
+ //If sub tensors are supported then the merger
//just needs to make sure that the outputs of the prev layer
//are made subtensors of the output of the merger layer.
m_OutputHandlers[0].CreateTensorHandles(factory);
- unsigned int innerAxis = m_Param.GetNumDimensions() - m_Param.GetConcatAxis();
-
- if (factory.SupportsSubTensors() && innerAxis != 1)
+ if (factory.SupportsSubTensors())
{
std::queue<MergerLayer*> m_MergerLayers;
@@ -52,23 +50,65 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact
{
MergerLayer* currentLayer = m_MergerLayers.front();
ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
-
+ const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
m_MergerLayers.pop();
const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
+
+ // First go through all the input slots and verify that we can sub-tensor all the inputs.
+ std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
+ subTensors.reserve(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; ++i)
{
OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
+ const TensorInfo& info = slot->GetTensorInfo();
+
+ auto CreateSubTensor = [&]()
+ {
+ // Make sure quantization parameters are in the same space
+ if (parentInfo.IsTypeSpaceMatch(info))
+ {
+ return factory.CreateSubTensorHandle(*parentTensor,
+ info.GetShape(),
+ currentLayer->m_Param.GetViewOrigin(i));
+ }
+ return std::unique_ptr<ITensorHandle>();
+ };
+
+ auto subTensor = CreateSubTensor();
+ if (!subTensor)
+ {
+ break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs.
+ }
+ else
+ {
+ subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor.
+ }
+ }
+
+ // Ensure that ALL inputs can be substituted with valid sub-tensors
+ if (subTensors.size() < numInputSlots)
+ {
+ continue; // Don't optimize this Merge layer with sub-tensors
+ }
+
+ // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers.
+ unsigned int i=0;
+ for (auto& subTensor : subTensors)
+ {
+ OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
OutputHandler& outputHandler = slot->GetOutputHandler();
- outputHandler.SetData(factory.CreateSubTensorHandle(*parentTensor,
- outputHandler.GetTensorInfo().GetShape(),
- currentLayer->m_Param.GetViewOrigin(i)));
+
+ BOOST_ASSERT_MSG(subTensor, "MergerLayer: Expected a valid sub-tensor for substitution.");
+ outputHandler.SetData(std::move(subTensor));
Layer& inputLayer = slot->GetOwningLayer();
if (inputLayer.GetType() == LayerType::Merger)
{
+ // Continue with the substitution if the connected inputs are also merger layers
m_MergerLayers.push(boost::polymorphic_downcast<MergerLayer*>(&inputLayer));
}
+ ++i;
}
}
}