aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2023-04-27 15:21:19 +0100
committerDavid Monahan <david.monahan@arm.com>2023-04-28 14:24:51 +0100
commit6c53f9fbea7d0b8786e1d29b850ab7bed85e167a (patch)
treec2b20f3e6b2a9713e2f79f8deebea4b2a73f0286
parent48ec813697c7d431a9159e5759fb31a41739fb10 (diff)
downloadarmnn-6c53f9fbea7d0b8786e1d29b850ab7bed85e167a.tar.gz
IVGCVSW-7588 Implement ElementWiseBinary Op for Opaque Delegate
* Added visit functions for ElementwiseBinary Ops * Moved MultiLayerFacade.hpp to common directory and updated both delegates to use it Signed-off-by: David Monahan <david.monahan@arm.com> Change-Id: I84b8bd74d15a194895e63da47c29be994531a889
-rw-r--r--delegate/CMakeLists.txt3
-rw-r--r--delegate/classic/CMakeLists.txt1
-rw-r--r--delegate/classic/src/MultiLayerFacade.hpp136
-rw-r--r--delegate/common/src/MultiLayerFacade.hpp136
-rw-r--r--delegate/opaque/CMakeLists.txt3
-rw-r--r--delegate/opaque/src/ElementwiseBinary.hpp411
-rw-r--r--delegate/opaque/src/MultiLayerFacade.hpp4
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp42
8 files changed, 593 insertions, 143 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 8045ee8e1d..a77e630da0 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -21,7 +21,8 @@ set(armnnDelegate_sources)
list(APPEND armnnDelegate_sources
common/include/DelegateOptions.hpp
common/src/DelegateOptions.cpp
- common/src/DelegateUtils.hpp)
+ common/src/DelegateUtils.hpp
+ common/src/MultiLayerFacade.hpp)
## Add Armnn as a Dependency
if(NOT ARMNN_SUB_PROJECT)
diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt
index 367ac40790..54e00e1c9f 100644
--- a/delegate/classic/CMakeLists.txt
+++ b/delegate/classic/CMakeLists.txt
@@ -25,7 +25,6 @@ list(APPEND armnnClassicDelegateObject_sources
src/GatherNd.hpp
src/LogicalBinary.hpp
src/Lstm.hpp
- src/MultiLayerFacade.hpp
src/Normalization.hpp
src/Pack.hpp
src/Pad.hpp
diff --git a/delegate/classic/src/MultiLayerFacade.hpp b/delegate/classic/src/MultiLayerFacade.hpp
deleted file mode 100644
index 90d0b3174d..0000000000
--- a/delegate/classic/src/MultiLayerFacade.hpp
+++ /dev/null
@@ -1,136 +0,0 @@
-//
-// Copyright © 2021,2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-// NOTE: the MultiLayerFacade class is a utility class which makes a chain
-// of operators look like a single IConnectableLayer with the first
-// layer in the chain supplying the input slots and the last supplying
-// the output slots. It enables us, for example, to simulate a
-// Tensorflow Lite FloorDiv operator by chaining a Div layer followed
-// by a Floor layer and pass them as a single unit to the code that
-// connects up the graph as the delegate proceeds to build up the
-// Arm NN subgraphs.
-//
-
-#include <common/include/ProfilingGuid.hpp>
-#include <armnn/INetwork.hpp>
-
-namespace armnnDelegate
-{
-
-class MultiLayerFacade : public armnn::IConnectableLayer
-{
-public:
- MultiLayerFacade() :
- m_FirstLayer(nullptr), m_LastLayer(nullptr) {}
-
- MultiLayerFacade(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer) :
- m_FirstLayer(firstLayer), m_LastLayer(lastLayer) {}
-
- MultiLayerFacade(const MultiLayerFacade& obj) :
- m_FirstLayer(obj.m_FirstLayer), m_LastLayer(obj.m_LastLayer) {}
-
- ~MultiLayerFacade() {} // we don't own the pointers
-
- MultiLayerFacade& operator=(const MultiLayerFacade& obj)
- {
- m_FirstLayer = obj.m_FirstLayer;
- m_LastLayer = obj.m_LastLayer;
- return *this;
- }
-
- void AssignValues(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer)
- {
- m_FirstLayer = firstLayer;
- m_LastLayer = lastLayer;
- }
-
- virtual const char* GetName() const override
- {
- return m_FirstLayer->GetName();
- }
-
- virtual unsigned int GetNumInputSlots() const override
- {
- return m_FirstLayer->GetNumInputSlots();
- }
-
- virtual unsigned int GetNumOutputSlots() const override
- {
- return m_LastLayer->GetNumOutputSlots();
- }
-
- virtual const armnn::IInputSlot& GetInputSlot(unsigned int index) const override
- {
- return m_FirstLayer->GetInputSlot(index);
- }
-
- virtual armnn::IInputSlot& GetInputSlot(unsigned int index) override
- {
- return m_FirstLayer->GetInputSlot(index);
- }
-
- virtual const armnn::IOutputSlot& GetOutputSlot(unsigned int index) const override
- {
- return m_LastLayer->GetOutputSlot(index);
- }
-
- virtual armnn::IOutputSlot& GetOutputSlot(unsigned int index) override
- {
- return m_LastLayer->GetOutputSlot(index);
- }
-
- virtual std::vector<armnn::TensorShape> InferOutputShapes(
- const std::vector<armnn::TensorShape>& inputShapes) const override
- {
- // NOTE: do not expect this function to be used. Likely that if it is it might need to be overridden
- // for particular sequences of operators.
- return m_FirstLayer->InferOutputShapes(inputShapes);
- }
-
- virtual LayerGuid GetGuid() const override
- {
- return m_FirstLayer->GetGuid();
- }
-
- virtual void ExecuteStrategy(armnn::IStrategy& strategy) const override
- {
- // Do not expect this function to be used so not providing an implementation
- // if an implementation is required and the chain contains more than two operators
- // would have to provide a way to record the intermediate layers so they could be
- // visited... the same applies to the BackendSelectionHint
- // below.
- }
-
- virtual void BackendSelectionHint(armnn::Optional<armnn::BackendId> backend) override
- {
- // Do not expect this function to be used so not providing an implementation
- }
-
- virtual armnn::LayerType GetType() const override
- {
- return m_FirstLayer->GetType();
- }
-
- virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
-
- void SetBackendId(const armnn::BackendId& id) override {}
-
-protected:
- /// Retrieve the handles to the constant values stored by the layer.
- /// @return A vector of the constant tensors stored by this layer.
- ConstantTensors GetConstantTensorsByRef() override { return {}; }
- ImmutableConstantTensors GetConstantTensorsByRef() const override { return {}; }
-
-private:
- armnn::IConnectableLayer* m_FirstLayer;
- armnn::IConnectableLayer* m_LastLayer;
-
- // to satisfy the GetParameters method need to hand back a NullDescriptor
- armnn::NullDescriptor m_NullDescriptor;
-};
-
-} // namespace armnnDelegate
diff --git a/delegate/common/src/MultiLayerFacade.hpp b/delegate/common/src/MultiLayerFacade.hpp
new file mode 100644
index 0000000000..20ccd35f22
--- /dev/null
+++ b/delegate/common/src/MultiLayerFacade.hpp
@@ -0,0 +1,136 @@
+//
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+// NOTE: the MultiLayerFacade class is a utility class which makes a chain
+// of operators look like a single IConnectableLayer with the first
+// layer in the chain supplying the input slots and the last supplying
+// the output slots. It enables us, for example, to simulate a
+// Tensorflow Lite FloorDiv operator by chaining a Div layer followed
+// by a Floor layer and pass them as a single unit to the code that
+// connects up the graph as the delegate proceeds to build up the
+// Arm NN subgraphs.
+//
+
+#include <common/include/ProfilingGuid.hpp>
+#include <armnn/INetwork.hpp>
+
+namespace armnnDelegate
+{
+
+ class MultiLayerFacade : public armnn::IConnectableLayer
+ {
+ public:
+ MultiLayerFacade() :
+ m_FirstLayer(nullptr), m_LastLayer(nullptr) {}
+
+ MultiLayerFacade(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer) :
+ m_FirstLayer(firstLayer), m_LastLayer(lastLayer) {}
+
+ MultiLayerFacade(const MultiLayerFacade& obj) :
+ m_FirstLayer(obj.m_FirstLayer), m_LastLayer(obj.m_LastLayer) {}
+
+ ~MultiLayerFacade() {} // we don't own the pointers
+
+ MultiLayerFacade& operator=(const MultiLayerFacade& obj)
+ {
+ m_FirstLayer = obj.m_FirstLayer;
+ m_LastLayer = obj.m_LastLayer;
+ return *this;
+ }
+
+ void AssignValues(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer)
+ {
+ m_FirstLayer = firstLayer;
+ m_LastLayer = lastLayer;
+ }
+
+ virtual const char* GetName() const override
+ {
+ return m_FirstLayer->GetName();
+ }
+
+ virtual unsigned int GetNumInputSlots() const override
+ {
+ return m_FirstLayer->GetNumInputSlots();
+ }
+
+ virtual unsigned int GetNumOutputSlots() const override
+ {
+ return m_LastLayer->GetNumOutputSlots();
+ }
+
+ virtual const armnn::IInputSlot& GetInputSlot(unsigned int index) const override
+ {
+ return m_FirstLayer->GetInputSlot(index);
+ }
+
+ virtual armnn::IInputSlot& GetInputSlot(unsigned int index) override
+ {
+ return m_FirstLayer->GetInputSlot(index);
+ }
+
+ virtual const armnn::IOutputSlot& GetOutputSlot(unsigned int index) const override
+ {
+ return m_LastLayer->GetOutputSlot(index);
+ }
+
+ virtual armnn::IOutputSlot& GetOutputSlot(unsigned int index) override
+ {
+ return m_LastLayer->GetOutputSlot(index);
+ }
+
+ virtual std::vector<armnn::TensorShape> InferOutputShapes(
+ const std::vector<armnn::TensorShape>& inputShapes) const override
+ {
+ // NOTE: do not expect this function to be used. Likely that if it is it might need to be overridden
+ // for particular sequences of operators.
+ return m_FirstLayer->InferOutputShapes(inputShapes);
+ }
+
+ virtual LayerGuid GetGuid() const override
+ {
+ return m_FirstLayer->GetGuid();
+ }
+
+ virtual void ExecuteStrategy(armnn::IStrategy& strategy) const override
+ {
+ // Do not expect this function to be used so not providing an implementation
+ // if an implementation is required and the chain contains more than two operators
+ // would have to provide a way to record the intermediate layers so they could be
+ // visited... the same applies to the BackendSelectionHint
+ // below.
+ }
+
+ virtual void BackendSelectionHint(armnn::Optional<armnn::BackendId> backend) override
+ {
+ // Do not expect this function to be used so not providing an implementation
+ }
+
+ virtual armnn::LayerType GetType() const override
+ {
+ return m_FirstLayer->GetType();
+ }
+
+ virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
+
+ void SetBackendId(const armnn::BackendId& id) override {}
+
+ protected:
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
+ ConstantTensors GetConstantTensorsByRef() override { return {}; }
+ ImmutableConstantTensors GetConstantTensorsByRef() const override { return {}; }
+
+ private:
+ armnn::IConnectableLayer* m_FirstLayer;
+ armnn::IConnectableLayer* m_LastLayer;
+
+ // to satisfy the GetParameters method need to hand back a NullDescriptor
+ armnn::NullDescriptor m_NullDescriptor;
+ };
+
+} // namespace armnnDelegate
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index f1c8851396..716bac6b53 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -15,6 +15,7 @@ list(APPEND armnnOpaqueDelegateObject_sources
src/Comparison.hpp
src/Control.hpp
src/Convolution.hpp
+ src/ElementwiseBinary.hpp
src/ElementwiseUnary.hpp
src/FullyConnected.hpp
src/Gather.hpp
@@ -83,4 +84,4 @@ target_link_libraries(armnnOpaqueDelegateObject PUBLIC flatbuffer_headers_opaque
## Export targets
install(TARGETS armnnOpaqueDelegateObject
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
- RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) \ No newline at end of file
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
diff --git a/delegate/opaque/src/ElementwiseBinary.hpp b/delegate/opaque/src/ElementwiseBinary.hpp
index e16969768e..d6a0947b96 100644
--- a/delegate/opaque/src/ElementwiseBinary.hpp
+++ b/delegate/opaque/src/ElementwiseBinary.hpp
@@ -2,3 +2,414 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+#include <MultiLayerFacade.hpp>
+
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ std::vector<armnn::TensorInfo> infos { inputInfo1, inputInfo2, outputInfo };
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ADD",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputInfo,
+ armnn::BinaryOperation::Add);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+
+TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DIV",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Div);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateFloorDivOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ // need first to validate that the div operator is supported
+ // then that the floor operator is supported
+ TfLiteStatus status = ValidateDivOperator(delegateData, tfLiteContext, inputInfo1, inputInfo2, outputInfo);
+ if (status != kTfLiteOk)
+ {
+ return status;
+ }
+ // if the inputs and output of the div are all Signed32 we don't need to add the floor operator afterward.
+ if (AreAllSigned32(inputInfo1, inputInfo2, outputInfo))
+ {
+ return status;
+ }
+ // in case broadcasting is being done from one of the inputs to the div
+ // choose the full sized input tensor to pass to the floor validation routine
+ armnn::TensorInfo floorInputInfo = inputInfo1;
+ if (inputInfo1.GetNumDimensions() < inputInfo2.GetNumDimensions())
+ {
+ floorInputInfo = inputInfo2;
+ }
+ status = ValidateFloorOperator(delegateData, tfLiteContext, floorInputInfo, outputInfo);
+ return status;
+}
+
+TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("MAXIMUM",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Maximum);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("MINIMUM",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Minimum);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("MUL",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Mul);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SUB",
+ tfLiteContext,
+ IsElementwiseBinarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo1,
+ inputInfo2,
+ outputTensorInfo,
+ armnn::BinaryOperation::Sub);
+ };
+
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
+ DelegateData& delegateData,
+ const armnn::TensorInfo& outputTensorInfo)
+{
+ armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Div);
+ // if the output of the div is Signed32 the Floor layer is not required
+ if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
+ {
+ return std::make_pair(divisionLayer, divisionLayer);
+ }
+ armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+ armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
+ outputSlot.Connect(floorLayer->GetInputSlot(0));
+ return std::make_pair(divisionLayer, floorLayer);
+}
+
+TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t elementwiseBinaryOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get Input Tensors
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteInputTensor0 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor0, elementwiseBinaryOperatorCode, nodeIndex))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid input tensor in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ // Use input indices to get filter tensor.
+ const TfLiteOpaqueTensor* tfLiteInputTensor1 = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+ if(!IsValid(tfLiteInputTensor1))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid input tensor in operator #%d node #%d: ",
+ elementwiseBinaryOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, elementwiseBinaryOperatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor0);
+ armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor1);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+
+
+ // Check if we need to expand the dims of the input tensor infos.
+ // This is required for a few of the backends.
+ if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+ {
+ ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+ }
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
+ tfLiteContext,
+ outputTensorInfo,
+ outputTensorInfo,
+ activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
+ if (!delegateData.m_Network)
+ {
+ switch(elementwiseBinaryOperatorCode)
+ {
+ case kTfLiteBuiltinAdd:
+ return ValidateAddOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinDiv:
+ return ValidateDivOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinFloorDiv:
+ return ValidateFloorDivOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinMaximum:
+ return ValidateMaximumOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinMinimum:
+ return ValidateMinimumOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinMul:
+ return ValidateMulOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ case kTfLiteBuiltinSub:
+ return ValidateSubOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo);
+ default:
+ return kTfLiteError;
+ }
+ }
+
+ armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
+ armnnDelegate::MultiLayerFacade multiLayer;
+ switch(elementwiseBinaryOperatorCode)
+ {
+ case kTfLiteBuiltinAdd:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Add);
+ break;
+ case kTfLiteBuiltinDiv:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Div);
+ break;
+ case kTfLiteBuiltinFloorDiv:
+ {
+ auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
+ multiLayer.AssignValues(layers.first, layers.second);
+ elementwiseBinaryLayer = &multiLayer;
+ }
+ break;
+ case kTfLiteBuiltinMaximum:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Maximum);
+ break;
+ case kTfLiteBuiltinMinimum:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Minimum);
+ break;
+ case kTfLiteBuiltinMul:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Mul);
+ break;
+ case kTfLiteBuiltinSub:
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+ armnn::BinaryOperation::Sub);
+ break;
+ default:
+ return kTfLiteError;
+ }
+ ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
+ armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
+ delegateData,
+ tfLiteContext,
+ tfLiteNode);
+ if (inputsTensorsProcess == kTfLiteError)
+ {
+ return inputsTensorsProcess;
+ }
+
+ if(Connect(elementwiseBinaryLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ // Check and Create Activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/MultiLayerFacade.hpp b/delegate/opaque/src/MultiLayerFacade.hpp
deleted file mode 100644
index e16969768e..0000000000
--- a/delegate/opaque/src/MultiLayerFacade.hpp
+++ /dev/null
@@ -1,4 +0,0 @@
-//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index ead577f806..5cef9c42ff 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -629,6 +629,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
nodeIndex,
kTfLiteBuiltinAbs,
armnn::UnaryOperation::Abs);
+ case kTfLiteBuiltinAdd:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinAdd);
case kTfLiteBuiltinArgMax:
return VisitArgMinMaxOperator(delegateData,
tfLiteContext,
@@ -720,6 +726,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinDepthwiseConv2d);
+ case kTfLiteBuiltinDiv:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDiv);
case kTfLiteBuiltinElu:
return VisitActivationOperator(delegateData,
tfLiteContext,
@@ -746,6 +758,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinFloor);
+ case kTfLiteBuiltinFloorDiv:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFloorDiv);
case kTfLiteBuiltinFullyConnected:
return VisitFullyConnectedOperator(delegateData,
tfLiteContext,
@@ -862,12 +880,30 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinMaxPool2d);
+ case kTfLiteBuiltinMaximum:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMaximum);
case kTfLiteBuiltinMean:
return VisitControlOperator(delegateData,
tfLiteContext,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinMean);
+ case kTfLiteBuiltinMinimum:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMinimum);
+ case kTfLiteBuiltinMul:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMul);
case kTfLiteBuiltinNeg:
return VisitElementwiseUnaryOperator(delegateData,
tfLiteContext,
@@ -932,6 +968,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinSpaceToBatchNd);
+ case kTfLiteBuiltinSub:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSub);
case kTfLiteBuiltinSqrt:
return VisitElementwiseUnaryOperator(delegateData,
tfLiteContext,