aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorTracy Narine <tracy.narine@arm.com>2023-09-20 14:19:07 +0100
committerTracy Narine <tracy.narine@arm.com>2023-09-28 14:25:16 +0100
commit6440ce89abb06e090d2b3cf91bafc14277072475 (patch)
treec55682891a0f01f3edbf5dad58720ded7af3fc64 /src/backends/backendsCommon
parent9a418d850333119e219fb05addc57b56cdc60a7e (diff)
downloadarmnn-6440ce89abb06e090d2b3cf91bafc14277072475.tar.gz
IVGCVSW-7504 Create a backend specific optimization to fuse ADD+MUL+Add+(Activation) in CpuAcc
* Adding CpuAcc backend optimization to fuse add+mul+add into one layer * Tests added/enhanced * Also added optional extended parameter to Graph::Print() and throw macros that could be used in place of assert Signed-off-by: Tracy Narine <tracy.narine@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I5f8d094b969a130d8c2c7b4da07426313a9fea76
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/SubgraphUtils.hpp160
-rw-r--r--src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp18
2 files changed, 172 insertions, 6 deletions
diff --git a/src/backends/backendsCommon/SubgraphUtils.hpp b/src/backends/backendsCommon/SubgraphUtils.hpp
index 9f2cdba6ef..6a9e8f1b76 100644
--- a/src/backends/backendsCommon/SubgraphUtils.hpp
+++ b/src/backends/backendsCommon/SubgraphUtils.hpp
@@ -161,6 +161,53 @@ SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnecta
return result;
}
+// Type used to hold the slot numbers to create the lists from. There should
+// be a SlotList for each layer in the layers list
+typedef std::vector<int> SlotList;
+
+template<typename ILayerType>
+SubgraphView::IInputSlots CreateIInputsFromSlotLists(const std::vector<ILayerType*>& layers,
+ const std::vector<SlotList>& layersSlotLists)
+{
+ ARMNN_THROW_INVALIDARG_IF_FALSE(layersSlotLists.size() == layers.size());
+
+ SubgraphView::IInputSlots result;
+
+ for (unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
+ {
+ const SlotList& slotList = layersSlotLists[layerIdx];
+ for (unsigned int slotIdx = 0 ; slotIdx < layers[layerIdx]->GetNumInputSlots(); ++slotIdx)
+ {
+ if (std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end())
+ {
+ result.push_back(&(layers[layerIdx]->GetInputSlot(slotIdx)));
+ }
+ }
+ }
+ return result;
+}
+
+template<typename ILayerType>
+SubgraphView::IOutputSlots CreateIOutputsFromSlotLists(const std::vector<ILayerType*>& layers,
+ const std::vector<SlotList>& layersSlotLists)
+{
+ ARMNN_THROW_INVALIDARG_IF_FALSE(layersSlotLists.size() == layers.size());
+
+ SubgraphView::IOutputSlots result;
+ for (unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
+ {
+ const SlotList& slotList = layersSlotLists[layerIdx];
+ for (unsigned int slotIdx = 0; slotIdx < layers[layerIdx]->GetNumOutputSlots(); ++slotIdx)
+ {
+ bool foundIt = std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end();
+ if (foundIt)
+ {
+ result.push_back(&(layers[layerIdx]->GetOutputSlot(slotIdx)));
+ }
+ }
+ }
+ return result;
+}
}
inline bool IsNCHW(armnn::Layer& layer)
@@ -308,4 +355,117 @@ LayerType* FoldPadIntoAveragePool2d(OptimizationViews& optimizationViews,
return replacementLayer;
}
+//
+// Layer sequence detection such as add + mul + add ( + optional activation )
+//
+
+inline bool IsSequenceLayerType(Layer& layer, LayerType type)
+{
+ return layer.GetType() == type;
+}
+
+inline bool IsSequenceLayerType(Layer& layer, BinaryOperation type)
+{
+ return (layer.GetType() == LayerType::ElementwiseBinary) &&
+ (PolymorphicDowncast<ElementwiseBinaryLayer*>(&layer)->GetParameters().m_Operation == type);
+}
+
+// Detect a layer sequence and activation if specified. The activation must be at the end of the sequence.
+template<typename TYPE>
+bool IsLayerSequence(Layer& currentLayer,
+ TYPE first,
+ TYPE second,
+ TYPE third,
+ Layer* layerList[4],
+ bool handleValidActivates,
+ const std::vector<ActivationFunction>& validActivates)
+{
+ auto PreviousLayer = [](Layer& layer)
+ {
+ return &layer.GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+ };
+
+ auto NextLayer = [](Layer& layer)
+ {
+ return &layer.GetOutputSlot(0).GetConnection(0)->GetOwningLayer();
+ };
+
+ auto LayerIncomingConnectionDataType = [](Layer& layer)
+ {
+ return layer.GetInputSlot(0).GetTensorInfo().GetDataType();
+ };
+
+ bool result = false;
+
+ // Match in reverse so there is only 1 connection to check
+ if (IsSequenceLayerType(currentLayer, third))
+ {
+ // Save DataType of third layer
+ DataType dataType = LayerIncomingConnectionDataType(currentLayer);
+
+ // Save third layer
+ layerList[2] = &currentLayer;
+
+ // Check the layers that proceed this one for the requested grouping
+ Layer *prevLayer = PreviousLayer(currentLayer);
+ if (prevLayer && IsSequenceLayerType(*prevLayer, second))
+ {
+ bool dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
+ if (! dataTypesMatch)
+ {
+ return result;
+ }
+
+ layerList[1] = prevLayer;
+ prevLayer = PreviousLayer(*prevLayer);
+ if (prevLayer && IsSequenceLayerType(*prevLayer, first))
+ {
+ dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
+ if (! dataTypesMatch)
+ {
+ return result;
+ }
+
+ layerList[0] = prevLayer;
+
+ // Detected the first 3 layers if we get to this point so now
+ // check to see if we have a valid activation. If there is no activation
+ // then the sequence still matches.
+ if (handleValidActivates)
+ {
+ Layer *nextLayer = NextLayer(currentLayer);
+ if (nextLayer)
+ {
+ if (IsSequenceLayerType(*nextLayer, LayerType::Activation))
+ {
+ // This layer is an activation, so it must be a valid type for the sequence
+ ActivationFunction activationFunction =
+ PolymorphicDowncast<ActivationLayer*>(nextLayer)->GetParameters().m_Function;
+ long count = std::count(validActivates.cbegin(),
+ validActivates.cend(),
+ activationFunction);
+ if (count > 0)
+ {
+ layerList[3] = nextLayer;
+ result = true;
+ }
+ }
+ else
+ {
+ // Next layer is not an activation so sequence still matches
+ result = true;
+ }
+ }
+ }
+ else
+ {
+ result = true;
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
index 9dece9be3b..39d2219954 100644
--- a/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/AddMulAddTestImpl.hpp
@@ -39,18 +39,18 @@ std::vector<LayerTestResult<T,4>> AddMulAddTest(armnn::IWorkloadFactory& workloa
if (IsQuantizedType<T>())
{
input0TensorInfo.SetQuantizationScale(0.25f);
- input0TensorInfo.SetQuantizationOffset(128);
+ input0TensorInfo.SetQuantizationOffset(10);
input1TensorInfo.SetQuantizationScale(0.25f);
- input1TensorInfo.SetQuantizationOffset(128);
+ input1TensorInfo.SetQuantizationOffset(11);
mulInput1TensorInfo.SetQuantizationScale(0.25f);
- mulInput1TensorInfo.SetQuantizationOffset(128);
+ mulInput1TensorInfo.SetQuantizationOffset(12);
addInput1TensorInfo.SetQuantizationScale(0.25f);
- addInput1TensorInfo.SetQuantizationOffset(128);
+ addInput1TensorInfo.SetQuantizationOffset(13);
output0TensorInfo.SetQuantizationScale(0.5f);
- output0TensorInfo.SetQuantizationOffset(120);
+ output0TensorInfo.SetQuantizationOffset(14);
output1TensorInfo.SetQuantizationScale(0.5f);
- output1TensorInfo.SetQuantizationOffset(120);
+ output1TensorInfo.SetQuantizationOffset(15);
}
std::vector<float> input0Data
@@ -140,6 +140,12 @@ std::vector<LayerTestResult<T,4>> AddMulAddTest(armnn::IWorkloadFactory& workloa
}
AddOutputToWorkload(fusedQueueDescriptor, info, output1TensorInfo, output1Handle.get());
+ if (addOutput)
+ {
+ AddOutputToWorkload(fusedQueueDescriptor, info, output0TensorInfo, output0Handle.get());
+ }
+ AddOutputToWorkload(fusedQueueDescriptor, info, output1TensorInfo, output1Handle.get());
+
std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Fused,
fusedQueueDescriptor,
info);