aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2023-04-24 12:53:04 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2023-04-24 18:03:44 +0100
commit080ffd84eccb4b849c192155c0ba39431d53c894 (patch)
treeb6ef09a3c5139ff900f627fc45d665c7e78401b9
parent1ffbaac882792f166357ecf388667e8405c69966 (diff)
downloadarmnn-080ffd84eccb4b849c192155c0ba39431d53c894.tar.gz
IVGCVSW-7584 Implement Conv2d and DepthwiseConv2d operators for Opaque Delegate
* Added VisitConvolution2d and VisitDepthwiseConv2d functions to Convolution2d.hpp. * Enabled Convolution2d and DepthwiseConv2d tests. * Moved TransposeConvolution2d tests to separate file. * Added Opaque Delegate shared functions. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ica10c9469fc830f512edad1ad79884f90ae511d0
-rw-r--r--delegate/CMakeLists.txt10
-rw-r--r--delegate/opaque/CMakeLists.txt5
-rw-r--r--delegate/opaque/src/Convolution.hpp499
-rw-r--r--delegate/opaque/src/SharedFunctions.cpp104
-rw-r--r--delegate/opaque/src/SharedFunctions.hpp27
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp12
-rw-r--r--delegate/test/Convolution2dTest.cpp326
-rw-r--r--delegate/test/TransposeConvolution2dTest.cpp140
8 files changed, 897 insertions, 226 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index f8b6ea52d1..b81feb6b1e 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -198,6 +198,7 @@ if(BUILD_UNIT_TESTS)
test/SplitTestHelper.hpp
test/TestUtils.hpp
test/TestUtils.cpp
+ test/TransposeConvolution2dTest.cpp
test/TransposeTest.cpp
test/TransposeTestHelper.hpp
test/UnidirectionalSequenceLstmTest.cpp
@@ -258,10 +259,13 @@ if(BUILD_UNIT_TESTS)
common/src/test/DelegateTestInterpreterUtils.hpp
opaque/src/test/ArmnnOpaqueDelegateTest.cpp
opaque/src/test/DelegateTestInterpreter.cpp
- test/TestUtils.hpp
- test/TestUtils.cpp
test/CastTest.cpp
- test/CastTestHelper.hpp)
+ test/CastTestHelper.hpp
+ test/Convolution2dTest.cpp
+ test/ConvolutionTestHelper.hpp
+ test/DepthwiseConvolution2dTest.cpp
+ test/TestUtils.hpp
+ test/TestUtils.cpp)
# Until all operators are supported, we have to add tests one by one above to opaqueDelegate_unittest_sources.
# After we add can add commonDelegate_unittest_sources to the add_executable below.
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index 1999299390..39df124310 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -8,7 +8,10 @@ list(APPEND armnnOpaqueDelegateObject_sources
include/armnn_delegate.hpp
include/Version.hpp
src/armnn_delegate.cpp
- src/Redefine.hpp)
+ src/Convolution.hpp
+ src/Redefine.hpp
+ src/SharedFunctions.cpp
+ src/SharedFunctions.hpp)
add_library(armnnOpaqueDelegateObject OBJECT ${armnnOpaqueDelegateObject_sources})
diff --git a/delegate/opaque/src/Convolution.hpp b/delegate/opaque/src/Convolution.hpp
index e16969768e..163290b542 100644
--- a/delegate/opaque/src/Convolution.hpp
+++ b/delegate/opaque/src/Convolution.hpp
@@ -2,3 +2,502 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#include <OpaqueDelegateUtils.hpp>
+#include <SharedFunctions.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ if (numInputs < 2)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Use input indices to get filter tensor.
+ const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+ if(!IsValid(tfLiteFilterTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteFilterTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic filter tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
+ tfLiteContext,
+ outputTensorInfo,
+ outputTensorInfo,
+ activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
+ armnn::TensorInfo biasTensorInfo;
+ const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
+
+ bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
+ if(biasEnabled)
+ {
+ // Use input indices to get bias tensor.
+ tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
+ if(!IsValid(tfLiteBiasTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid bias tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteBiasTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic bias tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
+
+ armnn::Convolution2dDescriptor descriptor;
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_StrideX = NonNegative(tfLiteNodeParameters->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(tfLiteNodeParameters->stride_height, nodeIndex);
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ descriptor.m_DilationX = NonNegative(tfLiteNodeParameters->dilation_width_factor, nodeIndex);
+ descriptor.m_DilationY = NonNegative(tfLiteNodeParameters->dilation_height_factor, nodeIndex);
+
+ // TfLite uses NHWC tensors
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
+ descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
+ CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
+ descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONV2D",
+ tfLiteContext,
+ IsConvolution2dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ optionalBiasInfo);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Set up filter and biases
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
+
+ if(filterTensorInfo.IsConstant())
+ {
+ auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
+
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+ }
+
+ if (biasEnabled)
+ {
+ if(biasTensorInfo.IsConstant())
+ {
+ auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ ARMNN_ASSERT(biasLayer != nullptr);
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
+
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+
+ // Check and Create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+}
+
+TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ if (numInputs < 2)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input indices and use to get input tensor.
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Use input indices to get filter tensor.
+ const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+ if(!IsValid(tfLiteFilterTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteFilterTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic filter tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Gather output indices and use to get output tensors.
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ auto* tfLiteNodeParameters =
+ reinterpret_cast<TfLiteDepthwiseConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+
+ TfLiteFusedActivation activationType = kTfLiteActNone;
+ if (tfLiteNodeParameters)
+ {
+ activationType = tfLiteNodeParameters->activation;
+ TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
+ tfLiteContext,
+ outputTensorInfo,
+ outputTensorInfo,
+ activationType);
+ if(activationStatus != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+ }
+
+ armnn::TensorInfo biasTensorInfo;
+ const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
+
+ bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
+ if(biasEnabled)
+ {
+ // Use input indices to get bias tensor.
+ tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
+ if(!IsValid(tfLiteBiasTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Invalid bias tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ if (IsDynamicTensor(tfLiteBiasTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic bias tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_StrideX = NonNegative(tfLiteNodeParameters->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(tfLiteNodeParameters->stride_height, nodeIndex);
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+ descriptor.m_DilationX = NonNegative(tfLiteNodeParameters->dilation_width_factor, nodeIndex);
+ descriptor.m_DilationY = NonNegative(tfLiteNodeParameters->dilation_height_factor, nodeIndex);
+
+ // Assuming input is NHWC
+ unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ // TensorflowLite weights come in the format [1, H, W, I * M]
+ unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
+ descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
+ CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
+ descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
+
+ armnn::BackendId setBackend;
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEPTHWISE_CONV2D",
+ tfLiteContext,
+ IsDepthwiseConvolutionSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
+
+ if(filterTensorInfo.IsConstant())
+ {
+ // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
+ auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
+
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+ }
+
+ if (biasEnabled)
+ {
+ if(biasTensorInfo.IsConstant())
+ {
+ auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
+
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ ARMNN_ASSERT(biasLayer != nullptr);
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
+
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ // Check and create activation
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+}
+
+TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ switch(operatorCode)
+ {
+ case kTfLiteBuiltinConv2d:
+ return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ case kTfLiteBuiltinDepthwiseConv2d:
+ return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+ default:
+ return kTfLiteError;
+ }
+}
+
+} \ No newline at end of file
diff --git a/delegate/opaque/src/SharedFunctions.cpp b/delegate/opaque/src/SharedFunctions.cpp
new file mode 100644
index 0000000000..93eb143bd0
--- /dev/null
+++ b/delegate/opaque/src/SharedFunctions.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SharedFunctions.hpp"
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo)
+{
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("FLOOR",
+ tfLiteContext,
+ IsFloorSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputTensorInfo,
+ outInfo);
+ };
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ TfLiteFusedActivation activationType)
+{
+ armnn::ActivationDescriptor activationDesc;
+
+ switch (activationType)
+ {
+ case kTfLiteActNone:
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ case kTfLiteActRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+ case kTfLiteActReluN1To1:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = -1.0f;
+ break;
+ }
+ case kTfLiteActRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ break;
+ }
+ case kTfLiteActSigmoid:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteActTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ activationDesc);
+ };
+ validateFunc(outputInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+} // namespace armnnDelegate
+
diff --git a/delegate/opaque/src/SharedFunctions.hpp b/delegate/opaque/src/SharedFunctions.hpp
new file mode 100644
index 0000000000..72fbe6a332
--- /dev/null
+++ b/delegate/opaque/src/SharedFunctions.hpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <tensorflow/lite/c/builtin_op_data.h>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo);
+
+TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ TfLiteFusedActivation activationType);
+
+} // namespace armnnOpaqueDelegate
+
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 04a4eae12e..2ef1e0069a 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -628,6 +628,18 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinCast);
+ case kTfLiteBuiltinConv2d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConv2d);
+ case kTfLiteBuiltinDepthwiseConv2d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDepthwiseConv2d);
default:
return kTfLiteError;
}
diff --git a/delegate/test/Convolution2dTest.cpp b/delegate/test/Convolution2dTest.cpp
index 3459e6883d..4b54a14058 100644
--- a/delegate/test/Convolution2dTest.cpp
+++ b/delegate/test/Convolution2dTest.cpp
@@ -55,22 +55,22 @@ void Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId>& backends)
tflite::Padding padding = tflite::Padding_SAME;
ConvolutionTest<float>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_FLOAT32,
- 2, // strideX
- 2, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
+ ::tflite::TensorType_FLOAT32,
+ 2, // strideX
+ 2, // strideY
+ 1, // dilationX
+ 1, // dilationY
+ padding,
+ tflite::ActivationFunctionType_NONE,
+ backends,
+ inputShape,
+ filterShape,
+ outputShape,
+ inputValues,
+ filterValues,
+ expectedOutputValues,
+ biasShape,
+ biasValues);
}
void Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId>& backends)
@@ -98,22 +98,22 @@ void Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId>& backends)
tflite::Padding padding = tflite::Padding_SAME;
ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
+ ::tflite::TensorType_INT8,
+ 1, // strideX
+ 1, // strideY
+ 1, // dilationX
+ 1, // dilationY
+ padding,
+ tflite::ActivationFunctionType_NONE,
+ backends,
+ inputShape,
+ filterShape,
+ outputShape,
+ inputValues,
+ filterValues,
+ expectedOutputValues,
+ biasShape,
+ biasValues);
}
void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
@@ -150,28 +150,28 @@ void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
tflite::Padding padding = tflite::Padding_SAME;
ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_UINT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_RELU,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues,
- {1.0f}, // biasScale
- {0}, // biasOffset
- {1.0f}, // filterScale
- {4}, // filterOffsets
- 2, // output scale
- 20); // output offset
+ ::tflite::TensorType_UINT8,
+ 1, // strideX
+ 1, // strideY
+ 1, // dilationX
+ 1, // dilationY
+ padding,
+ tflite::ActivationFunctionType_RELU,
+ backends,
+ inputShape,
+ filterShape,
+ outputShape,
+ inputValues,
+ filterValues,
+ expectedOutputValues,
+ biasShape,
+ biasValues,
+ {1.0f}, // biasScale
+ {0}, // biasOffset
+ {1.0f}, // filterScale
+ {4}, // filterOffsets
+ 2, // output scale
+ 20); // output offset
}
void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
@@ -204,22 +204,22 @@ void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
tflite::Padding padding = tflite::Padding_SAME;
ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_UINT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_RELU6,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
+ ::tflite::TensorType_UINT8,
+ 1, // strideX
+ 1, // strideY
+ 1, // dilationX
+ 1, // dilationY
+ padding,
+ tflite::ActivationFunctionType_RELU6,
+ backends,
+ inputShape,
+ filterShape,
+ outputShape,
+ inputValues,
+ filterValues,
+ expectedOutputValues,
+ biasShape,
+ biasValues);
}
@@ -271,32 +271,32 @@ void Conv2DPerChannelInt8Test(std::vector<armnn::BackendId>& backends)
tflite::Padding padding = tflite::Padding_SAME;
ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues,
- biasScales,
- {0,0,0,0},
- filterScales,
- {0,0,0,0},
- outputQuantScale,
- outputQuantOffset,
- inputQuantScale,
- inputQuantOffset,
- 1, // depth_multiplier is ignored for conv2d value doesn't matter
- filterQuantizationDim);
+ ::tflite::TensorType_INT8,
+ 1, // strideX
+ 1, // strideY
+ 1, // dilationX
+ 1, // dilationY
+ padding,
+ tflite::ActivationFunctionType_NONE,
+ backends,
+ inputShape,
+ filterShape,
+ outputShape,
+ inputValues,
+ filterValues,
+ expectedOutputValues,
+ biasShape,
+ biasValues,
+ biasScales,
+ {0,0,0,0},
+ filterScales,
+ {0,0,0,0},
+ outputQuantScale,
+ outputQuantOffset,
+ inputQuantScale,
+ inputQuantOffset,
+ 1, // depth_multiplier is ignored for conv2d value doesn't matter
+ filterQuantizationDim);
}
TEST_SUITE("Convolution2dTest_CpuRefTests")
@@ -327,14 +327,14 @@ TEST_SUITE("Convolution2dTest_CpuAccTests")
TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-Conv2DWithBiasesFp32Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ Conv2DWithBiasesFp32Test(backends);
}
TEST_CASE ("Conv2DWithBiases_Int8_CpuAcc_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-Conv2DWithBiasesInt8Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ Conv2DWithBiasesInt8Test(backends);
}
TEST_CASE ("Conv2DPerChannel_Int8_CpuAcc_Test")
@@ -350,14 +350,14 @@ TEST_SUITE("Convolution2dTest_GpuAccTests")
TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-Conv2DWithBiasesFp32Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ Conv2DWithBiasesFp32Test(backends);
}
TEST_CASE ("Conv2DWithBiases_Int8_GpuAcc_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-Conv2DWithBiasesInt8Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ Conv2DWithBiasesInt8Test(backends);
}
TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
@@ -368,122 +368,4 @@ TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
-void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> transposeTensorShape { 4 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
- static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
- std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
- std::vector<int8_t> expectedOutputValues =
- {
- 0, 1, 2,
- 2, 11, 12,
- 6, 20, 16
- };
-
- tflite::Padding padding = tflite::Padding_VALID;
- TransposeConvTest<int8_t>(backends,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- padding,
- transposeTensorShape,
- filterShape,
- inputShape,
- outputShape,
- transposeData,
- filterValues,
- inputValues,
- expectedOutputValues);
-}
-
-void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> transposeTensorShape { 4 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
- static std::vector<float> inputValues = { 1, 2, 3, 4 };
- std::vector<float> filterValues = { 0, 1, 2, 4 };
- std::vector<float> expectedOutputValues =
- {
- 0, 1, 2,
- 2, 11, 12,
- 6, 20, 16
- };
-
- tflite::Padding padding = tflite::Padding_VALID;
- TransposeConvTest<float>(backends,
- ::tflite::TensorType_FLOAT32,
- 1, // strideX
- 1, // strideY
- padding,
- transposeTensorShape,
- filterShape,
- inputShape,
- outputShape,
- transposeData,
- filterValues,
- inputValues,
- expectedOutputValues);
-}
-
-TEST_SUITE("TransposeConv_CpuRef_Test")
-{
-
-TEST_CASE ("TransposeConv_CpuRef_Fp32_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_CpuRef_Int8_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- TransposeConvInt8Test(backends);
-}
-
-} // End of TEST_SUITE(TransposeConv_CpuRef_Test)
-
-TEST_SUITE("TransposeConv_CpuAcc_Test")
-{
-
-TEST_CASE ("TransposeConv_CpuAcc_Fp32_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_CpuAcc_Int8_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- TransposeConvInt8Test(backends);
-}
-
-} // End of TEST_SUITE(TransposeConv_CpuAcc_Test)
-
-TEST_SUITE("TransposeConv_GpuAcc_Test")
-{
-
-TEST_CASE ("TransposeConv_GpuAcc_Fp32_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_GpuAcc_Int8_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- TransposeConvInt8Test(backends);
-}
-
-} // End of TEST_SUITE(TransposeConv_GpuAcc_Test)
-
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/TransposeConvolution2dTest.cpp b/delegate/test/TransposeConvolution2dTest.cpp
new file mode 100644
index 0000000000..4630a9004f
--- /dev/null
+++ b/delegate/test/TransposeConvolution2dTest.cpp
@@ -0,0 +1,140 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConvolutionTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> transposeTensorShape { 4 };
+ std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+ std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+ std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+ std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
+ static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
+ std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
+ std::vector<int8_t> expectedOutputValues =
+ {
+ 0, 1, 2,
+ 2, 11, 12,
+ 6, 20, 16
+ };
+
+ tflite::Padding padding = tflite::Padding_VALID;
+ TransposeConvTest<int8_t>(backends,
+ ::tflite::TensorType_INT8,
+ 1, // strideX
+ 1, // strideY
+ padding,
+ transposeTensorShape,
+ filterShape,
+ inputShape,
+ outputShape,
+ transposeData,
+ filterValues,
+ inputValues,
+ expectedOutputValues);
+}
+
+void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> transposeTensorShape { 4 };
+ std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+ std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+ std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+ std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
+ static std::vector<float> inputValues = { 1, 2, 3, 4 };
+ std::vector<float> filterValues = { 0, 1, 2, 4 };
+ std::vector<float> expectedOutputValues =
+ {
+ 0, 1, 2,
+ 2, 11, 12,
+ 6, 20, 16
+ };
+
+ tflite::Padding padding = tflite::Padding_VALID;
+ TransposeConvTest<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ 1, // strideX
+ 1, // strideY
+ padding,
+ transposeTensorShape,
+ filterShape,
+ inputShape,
+ outputShape,
+ transposeData,
+ filterValues,
+ inputValues,
+ expectedOutputValues);
+}
+
+TEST_SUITE("TransposeConv_CpuRef_Test")
+{
+
+TEST_CASE ("TransposeConv_CpuRef_Fp32_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ TransposeConvFp32Test(backends);
+}
+
+TEST_CASE ("TransposeConv_CpuRef_Int8_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ TransposeConvInt8Test(backends);
+}
+
+} // End of TEST_SUITE(TransposeConv_CpuRef_Test)
+
+TEST_SUITE("TransposeConv_CpuAcc_Test")
+{
+
+TEST_CASE ("TransposeConv_CpuAcc_Fp32_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ TransposeConvFp32Test(backends);
+}
+
+TEST_CASE ("TransposeConv_CpuAcc_Int8_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ TransposeConvInt8Test(backends);
+}
+
+} // End of TEST_SUITE(TransposeConv_CpuAcc_Test)
+
+TEST_SUITE("TransposeConv_GpuAcc_Test")
+{
+
+TEST_CASE ("TransposeConv_GpuAcc_Fp32_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ TransposeConvFp32Test(backends);
+}
+
+TEST_CASE ("TransposeConv_GpuAcc_Int8_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ TransposeConvInt8Test(backends);
+}
+
+} // End of TEST_SUITE(TransposeConv_GpuAcc_Test)
+
+} // namespace armnnDelegate \ No newline at end of file