aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/CMakeLists.txt1
-rw-r--r--src/backends/neon/NeonBackend.cpp82
-rw-r--r--src/backends/neon/NeonBackendOptimizationUtils.hpp215
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp2
4 files changed, 300 insertions, 0 deletions
diff --git a/src/backends/neon/CMakeLists.txt b/src/backends/neon/CMakeLists.txt
index 1c077731c4..8ceeef386b 100644
--- a/src/backends/neon/CMakeLists.txt
+++ b/src/backends/neon/CMakeLists.txt
@@ -8,6 +8,7 @@ if(ARMCOMPUTENEON)
NeonBackend.cpp
NeonBackend.hpp
NeonBackendId.hpp
+ NeonBackendOptimizationUtils.hpp
NeonBackendModelContext.hpp
NeonBackendModelContext.cpp
NeonInterceptorScheduler.hpp
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index b5719db007..7311098631 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -9,6 +9,7 @@
#include "NeonWorkloadFactory.hpp"
#include "NeonLayerSupport.hpp"
#include "NeonTensorHandleFactory.hpp"
+#include "NeonBackendOptimizationUtils.hpp"
#include <armnn/BackendRegistry.hpp>
#include <armnn/Descriptors.hpp>
@@ -28,6 +29,7 @@
#include <neon/workloads/NeonDepthwiseConvolutionWorkload.hpp>
#include <neon/workloads/NeonDivisionWorkload.hpp>
#include <neon/workloads/NeonFullyConnectedWorkload.hpp>
+#include <neon/workloads/NeonFusedWorkload.hpp>
#include <neon/workloads/NeonMultiplicationWorkload.hpp>
#include <neon/workloads/NeonReduceWorkload.hpp>
#include <neon/workloads/NeonSubtractionWorkload.hpp>
@@ -524,6 +526,86 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
}
RemoveReshapeLayer(baseLayer, untouched, optimizationViews);
}
+
+ // Replace Add/Mul/Add where possible
+ Layer* layerList[4] = {nullptr, nullptr, nullptr, nullptr};
+ const std::vector<ActivationFunction> validActivates = { ActivationFunction::ReLu,
+ ActivationFunction::BoundedReLu };
+ if (IsLayerSequence<BinaryOperation>(base,
+ BinaryOperation::Add, BinaryOperation::Mul, BinaryOperation::Add,
+ layerList,
+ true, // handleValidActivates
+ validActivates))
+ {
+ bool fuseReLu = false;
+ unsigned int numInputs = 0;
+ unsigned int numOutputs = 0;
+ std::vector<TensorInfo> inputInfos;
+ std::vector<TensorInfo> outputInfos;
+ const ActivationDescriptor* activationDescriptor = nullptr;
+
+ if (BuildAddMulAddTensorInfoLists<Layer>(layerList,
+ numInputs,
+ numOutputs,
+ inputInfos,
+ outputInfos,
+ activationDescriptor,
+ fuseReLu))
+ {
+ // Create the new Add/Mul/Add layer and set the Relu activation function
+ FusedDescriptor fusedDescriptor(numInputs, numOutputs, FusedKernelType::AddMulAdd);
+ arm_compute::Status status = NeonFusedWorkloadValidate({inputInfos.begin(), inputInfos.end()},
+ {outputInfos.begin(), outputInfos.end()},
+ fusedDescriptor,
+ activationDescriptor);
+ if (status)
+ {
+ std::string fusedName;
+ GetFusedName(layerList, fusedName);
+
+ IConnectableLayer* addMulAddLayer =
+ optimizationViews.GetINetwork()->AddFusedLayer(fusedDescriptor, fusedName.c_str());
+
+ if (fuseReLu)
+ {
+ FusedLayer* addMulAddFusedLayer = PolymorphicDowncast<FusedLayer*>(addMulAddLayer);
+ addMulAddFusedLayer->SetAdditionalInfoForObject(
+ std::make_shared<ActivationDescriptor>(*activationDescriptor));
+ }
+
+ // Update the graph
+ std::vector<IConnectableLayer*> originalLayers;
+ for (unsigned int layerIdx = 0; layerIdx < 4; ++layerIdx)
+ {
+ if (layerList[layerIdx])
+ {
+ originalLayers.push_back(layerList[layerIdx]);
+ }
+ }
+
+ std::vector<SlotList> inputLayersSlotLists, outputLayersSlotLists;
+ BuildAddMulAddSlotLists<SlotList>(fuseReLu,
+ outputInfos.size() > 1,
+ inputLayersSlotLists,
+ outputLayersSlotLists);
+
+ ReplaceMultipleLayers<FusedLayer>(optimizationViews,
+ originalLayers,
+ PolymorphicDowncast<FusedLayer*>(addMulAddLayer),
+ inputLayersSlotLists,
+ outputLayersSlotLists);
+
+ // Remove unused layers
+ for (unsigned int layerIdx = 0; layerIdx < 4; ++layerIdx)
+ {
+ if (layerList[layerIdx])
+ {
+ untouched.erase(layerList[layerIdx]->GetGuid());
+ }
+ }
+ }
+ }
+ }
}
if (optimizationViews.GetSubstitutions().empty() && optimizationViews.GetDeletedSubgraphs().empty())
diff --git a/src/backends/neon/NeonBackendOptimizationUtils.hpp b/src/backends/neon/NeonBackendOptimizationUtils.hpp
new file mode 100644
index 0000000000..3a8bf46599
--- /dev/null
+++ b/src/backends/neon/NeonBackendOptimizationUtils.hpp
@@ -0,0 +1,215 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <aclCommon/ArmComputeSubgraphUtils.hpp>
+
+namespace armnn
+{
+
+// Changes shapes of the form [1, 1, ..., W] to [ W ]
+inline bool CollapseLeadingUnitDimensions(const TensorInfo& in, TensorInfo& out)
+{
+ unsigned int numDimensions = in.GetNumDimensions();
+ for (unsigned int i = 0; i < (numDimensions-1); ++i)
+ {
+ if (in.GetShape()[i] != 1)
+ {
+ return false;
+ }
+ }
+
+ unsigned int w = in.GetShape()[numDimensions-1];
+ out = in;
+ out.SetShape({w});
+
+ return true;
+}
+
+//
+// Build slot and tensor info lists for Add/Mul/Add replacement
+//
+template<typename SlotListType>
+void BuildAddMulAddSlotLists(bool handleReLu,
+ bool multipleOutputs,
+ std::vector<SlotListType>& inputLayersSlotLists,
+ std::vector<SlotListType>& outputLayersSlotLists)
+{
+ // Build input slot list
+ inputLayersSlotLists.push_back({0, 1}); // Add
+ inputLayersSlotLists.push_back({1}); // Mul
+ inputLayersSlotLists.push_back({1}); // Add
+ if (handleReLu)
+ {
+ inputLayersSlotLists.push_back({}); // Relu
+ }
+
+ // Build output slot list
+ if (multipleOutputs)
+ {
+ outputLayersSlotLists.push_back({0}); // Add
+ }
+ else
+ {
+ outputLayersSlotLists.push_back({}); // Add
+ }
+ outputLayersSlotLists.push_back({}); // Mul
+ if (handleReLu)
+ {
+ outputLayersSlotLists.push_back({}); // Add
+ outputLayersSlotLists.push_back({0}); // Relu
+ }
+ else
+ {
+ outputLayersSlotLists.push_back({0}); // Add
+ }
+}
+
+inline void GetFusedName(Layer *layerList[4], std::string& fusedName)
+{
+ // Build the fused name string
+ fusedName = "fused";
+ for (unsigned int layerIdx = 0; layerIdx< 4; ++layerIdx)
+ {
+ if (! layerList[layerIdx])
+ {
+ break;
+ }
+ fusedName += "-";
+ fusedName += layerList[layerIdx]->GetNameStr();
+ }
+}
+
+template<typename Type>
+bool BuildAddMulAddTensorInfoLists(Type* layerList[4],
+ unsigned int& numInputs,
+ unsigned int& numOutputs,
+ std::vector<TensorInfo>& inputInfos,
+ std::vector<TensorInfo>& outputInfos,
+ const ActivationDescriptor*& activationDescriptor,
+ bool& fuseReLu)
+{
+ ARMNN_THROW_INVALIDARG_IF_FALSE(layerList[0]);
+ ARMNN_THROW_INVALIDARG_IF_FALSE(layerList[1]);
+ ARMNN_THROW_INVALIDARG_IF_FALSE(layerList[2]);
+
+ ARMNN_THROW_INVALIDARG_IF_FALSE(IsSequenceLayerType(*layerList[0], BinaryOperation::Add));
+ ARMNN_THROW_INVALIDARG_IF_FALSE(IsSequenceLayerType(*layerList[1], BinaryOperation::Mul));
+ ARMNN_THROW_INVALIDARG_IF_FALSE(IsSequenceLayerType(*layerList[2], BinaryOperation::Add));
+
+ fuseReLu = (layerList[3] != nullptr);
+ if (fuseReLu)
+ {
+ activationDescriptor = &PolymorphicDowncast<ActivationLayer *>(layerList[3])->GetParameters();
+ ARMNN_THROW_INVALIDARG_IF_FALSE((activationDescriptor->m_Function == ActivationFunction::ReLu) ||
+ (activationDescriptor->m_Function == ActivationFunction::BoundedReLu));
+ }
+
+ numInputs = 0;
+ numOutputs = 0;
+
+ // Ensure that there are 6 input slots in the add/mul/add layers
+ // we are going to replace
+ unsigned int layerIdx = 0;
+ unsigned int inputSlotCount = 0;
+ for (layerIdx = 0; layerIdx < 3; ++layerIdx)
+ {
+ for (unsigned int slotIdx = 0; slotIdx < layerList[layerIdx]->GetNumInputSlots(); ++slotIdx)
+ {
+ InputSlot* inputSlot = &layerList[layerIdx]->GetInputSlot(slotIdx);
+ OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
+ if (outputSlot)
+ {
+ if (layerIdx == 0)
+ {
+ // Always count the input connections of the first add
+ inputInfos.push_back(inputSlot->GetTensorInfo());
+ numInputs++;
+ }
+ else
+ {
+ // For subsequent layers, we skip connections to the previous layers in the counting
+ if (&outputSlot->GetOwningLayer() != layerList[layerIdx-1])
+ {
+ TensorInfo inputSlotInfo = inputSlot->GetTensorInfo();
+ if (numInputs == 2 || numInputs == 3)
+ {
+ // Workaround the broadcast optimization to collapse shapes such as
+ // [1, 1, 1, 2] to [2] as required by backend
+ if (CollapseLeadingUnitDimensions(inputSlot->GetTensorInfo(), inputSlotInfo))
+ {
+ OutputSlot* previousLayerSlot = inputSlot->GetConnectedOutputSlot();
+ if (previousLayerSlot)
+ {
+ if (previousLayerSlot->GetOwningLayer().GetType() == LayerType::Constant)
+ {
+ // First update the TensorInfo in the constant owning layer
+ previousLayerSlot->SetTensorInfo(inputSlotInfo);
+ // Then update the TensorInfo in the workload for the owning layer
+ ConstantLayer* layer = PolymorphicDowncast<ConstantLayer*>(
+ &previousLayerSlot->GetOwningLayer());
+ layer->m_LayerOutput
+ = std::make_unique<ScopedTensorHandle>(
+ ConstTensor(inputSlotInfo,
+ layer->m_LayerOutput.get()->GetConstTensor<void>()));
+ }
+ }
+ }
+ }
+ inputInfos.push_back(inputSlotInfo);
+ numInputs++;
+ }
+ }
+ inputSlotCount++;
+ }
+ }
+ }
+
+ // Check the input counts
+ bool validInputCount = (inputSlotCount == 6) && (inputInfos.size() == 4);
+ if (! validInputCount)
+ {
+ return false;
+ }
+
+ const unsigned int maxIdx = (fuseReLu) ? 4 : 3;
+ for (layerIdx = 0; layerIdx < maxIdx; ++layerIdx)
+ {
+ for (unsigned int slotIdx = 0; slotIdx < layerList[layerIdx]->GetNumOutputSlots(); ++slotIdx)
+ {
+ OutputSlot* outputSlot = &layerList[layerIdx]->GetOutputSlot(slotIdx);
+
+ for (unsigned int connectionIdx = 0; connectionIdx < outputSlot->GetNumConnections(); ++connectionIdx)
+ {
+ InputSlot* inputSlot = outputSlot->GetConnection(connectionIdx);
+ if (layerIdx < (maxIdx-1))
+ {
+ if (&inputSlot->GetOwningLayer() != layerList[layerIdx+1])
+ {
+ outputInfos.push_back(outputSlot->GetTensorInfo());
+ numOutputs++;
+ }
+ }
+ else if (layerList[layerIdx] != nullptr)
+ {
+ outputInfos.push_back(outputSlot->GetTensorInfo());
+ numOutputs++;
+ }
+ }
+ }
+ }
+
+ // Check the output count
+ bool validOutputCount = (outputInfos.size() > 0);
+ if (! validOutputCount)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+}
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index c9dd1ff507..658d718b19 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1726,9 +1726,11 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Te
// AddMulAdd
ARMNN_AUTO_TEST_CASE_WITH_THF(AddMulAdd2OutputsFloat32, AddMulAddTest<DataType::Float32>, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(AddMulAdd2OutputsInt8, AddMulAddTest<DataType::QAsymmS8>, true)
ARMNN_AUTO_TEST_CASE_WITH_THF(AddMulAdd2OutputsUint8, AddMulAddTest<DataType::QAsymmU8>, true)
ARMNN_AUTO_TEST_CASE_WITH_THF(AddMulAdd1OutputFloat32, AddMulAddTest<DataType::Float32>, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(AddMulAdd1OutputInt8, AddMulAddTest<DataType::QAsymmS8>, false)
ARMNN_AUTO_TEST_CASE_WITH_THF(AddMulAdd1OutputUint8, AddMulAddTest<DataType::QAsymmU8>, false)
#if defined(ARMNNREF_ENABLED)