aboutsummaryrefslogtreecommitdiff
path: root/delegate/src
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-14 12:10:28 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-28 11:41:55 +0100
commitad1b3d7518429e2d16a2695d9b0bbf81b6565ac9 (patch)
treea5b8e1ad68a2437f007338f0b6195ca5ed2bddc3 /delegate/src
parent9cb3466b677a1048b8abb24661e92c4c83fdda04 (diff)
downloadarmnn-ad1b3d7518429e2d16a2695d9b0bbf81b6565ac9.tar.gz
IVGCVSW-7555 Restructure Delegate
* New folders created: * common is for common code where TfLite API is not used * classic is for existing delegate implementations * opaque is for new opaque delegate implementation, * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use. * Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so * Opaque structure is introduced but no API is added yet. * CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added * Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE * Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
Diffstat (limited to 'delegate/src')
-rw-r--r--delegate/src/Activation.hpp133
-rw-r--r--delegate/src/ArgMinMax.hpp132
-rw-r--r--delegate/src/BatchMatMul.hpp107
-rw-r--r--delegate/src/BatchSpace.hpp216
-rw-r--r--delegate/src/Comparison.hpp135
-rw-r--r--delegate/src/Control.hpp342
-rw-r--r--delegate/src/Convolution.hpp870
-rw-r--r--delegate/src/DelegateOptions.cpp256
-rw-r--r--delegate/src/DelegateUtils.hpp639
-rw-r--r--delegate/src/ElementwiseBinary.hpp401
-rw-r--r--delegate/src/ElementwiseUnary.hpp91
-rw-r--r--delegate/src/Fill.hpp114
-rw-r--r--delegate/src/FullyConnected.hpp275
-rw-r--r--delegate/src/Gather.hpp106
-rw-r--r--delegate/src/GatherNd.hpp82
-rw-r--r--delegate/src/LogicalBinary.hpp102
-rw-r--r--delegate/src/Lstm.hpp268
-rw-r--r--delegate/src/MultiLayerFacade.hpp136
-rw-r--r--delegate/src/Normalization.hpp162
-rw-r--r--delegate/src/Pack.hpp122
-rw-r--r--delegate/src/Pad.hpp179
-rw-r--r--delegate/src/Pooling.hpp327
-rw-r--r--delegate/src/Prelu.hpp108
-rw-r--r--delegate/src/Quantization.hpp171
-rw-r--r--delegate/src/Redefine.hpp289
-rw-r--r--delegate/src/Reduce.hpp146
-rw-r--r--delegate/src/Resize.hpp205
-rw-r--r--delegate/src/Round.hpp71
-rw-r--r--delegate/src/Shape.hpp95
-rw-r--r--delegate/src/SharedFunctions.cpp116
-rw-r--r--delegate/src/SharedFunctions.hpp25
-rw-r--r--delegate/src/Slice.hpp141
-rw-r--r--delegate/src/Softmax.hpp155
-rw-r--r--delegate/src/SpaceDepth.hpp152
-rw-r--r--delegate/src/Split.hpp347
-rw-r--r--delegate/src/StridedSlice.hpp156
-rw-r--r--delegate/src/Transpose.hpp110
-rw-r--r--delegate/src/UnidirectionalSequenceLstm.hpp302
-rw-r--r--delegate/src/Unpack.hpp214
-rw-r--r--delegate/src/armnn_delegate.cpp1059
-rw-r--r--delegate/src/armnn_external_delegate.cpp68
-rw-r--r--delegate/src/test/ActivationTest.cpp299
-rw-r--r--delegate/src/test/ActivationTestHelper.hpp130
-rw-r--r--delegate/src/test/ArgMinMaxTest.cpp174
-rw-r--r--delegate/src/test/ArgMinMaxTestHelper.hpp199
-rw-r--r--delegate/src/test/ArmnnDelegateTest.cpp93
-rw-r--r--delegate/src/test/BatchMatMulTest.cpp689
-rw-r--r--delegate/src/test/BatchMatMulTestHelper.hpp208
-rw-r--r--delegate/src/test/BatchSpaceTest.cpp299
-rw-r--r--delegate/src/test/BatchSpaceTestHelper.hpp218
-rw-r--r--delegate/src/test/CastTest.cpp95
-rw-r--r--delegate/src/test/CastTestHelper.hpp159
-rw-r--r--delegate/src/test/ComparisonTest.cpp844
-rw-r--r--delegate/src/test/ComparisonTestHelper.hpp238
-rw-r--r--delegate/src/test/ControlTest.cpp420
-rw-r--r--delegate/src/test/ControlTestHelper.hpp346
-rw-r--r--delegate/src/test/Convolution2dTest.cpp489
-rw-r--r--delegate/src/test/Convolution3dTest.cpp318
-rw-r--r--delegate/src/test/ConvolutionTestHelper.hpp784
-rw-r--r--delegate/src/test/DelegateOptionsTest.cpp372
-rw-r--r--delegate/src/test/DelegateOptionsTestHelper.hpp344
-rw-r--r--delegate/src/test/DepthwiseConvolution2dTest.cpp282
-rw-r--r--delegate/src/test/ElementwiseBinaryTest.cpp1136
-rw-r--r--delegate/src/test/ElementwiseBinaryTestHelper.hpp243
-rw-r--r--delegate/src/test/ElementwiseUnaryTest.cpp420
-rw-r--r--delegate/src/test/ElementwiseUnaryTestHelper.hpp189
-rw-r--r--delegate/src/test/FillTest.cpp221
-rw-r--r--delegate/src/test/FillTestHelper.hpp159
-rw-r--r--delegate/src/test/FullyConnectedTest.cpp178
-rw-r--r--delegate/src/test/FullyConnectedTestHelper.hpp255
-rw-r--r--delegate/src/test/GatherNdTest.cpp113
-rw-r--r--delegate/src/test/GatherNdTestHelper.hpp181
-rw-r--r--delegate/src/test/GatherTest.cpp117
-rw-r--r--delegate/src/test/GatherTestHelper.hpp184
-rw-r--r--delegate/src/test/LogicalTest.cpp226
-rw-r--r--delegate/src/test/LogicalTestHelper.hpp201
-rw-r--r--delegate/src/test/LstmTest.cpp189
-rw-r--r--delegate/src/test/LstmTestHelper.hpp691
-rw-r--r--delegate/src/test/MirrorPadTest.cpp341
-rw-r--r--delegate/src/test/NeonDelegateTests_NDK_Issue.cpp63
-rw-r--r--delegate/src/test/NormalizationTest.cpp72
-rw-r--r--delegate/src/test/NormalizationTestHelper.hpp263
-rw-r--r--delegate/src/test/PackTest.cpp516
-rw-r--r--delegate/src/test/PackTestHelper.hpp186
-rw-r--r--delegate/src/test/PadTest.cpp606
-rw-r--r--delegate/src/test/PadTestHelper.hpp224
-rw-r--r--delegate/src/test/Pooling2dTest.cpp1275
-rw-r--r--delegate/src/test/Pooling2dTestHelper.hpp196
-rw-r--r--delegate/src/test/Pooling3dTest.cpp431
-rw-r--r--delegate/src/test/Pooling3dTestHelper.hpp298
-rw-r--r--delegate/src/test/PreluTest.cpp134
-rw-r--r--delegate/src/test/PreluTestHelper.hpp195
-rw-r--r--delegate/src/test/QuantizationTest.cpp455
-rw-r--r--delegate/src/test/QuantizationTestHelper.hpp200
-rw-r--r--delegate/src/test/RedefineTestHelper.hpp202
-rw-r--r--delegate/src/test/ReduceTest.cpp423
-rw-r--r--delegate/src/test/ReduceTestHelper.hpp228
-rw-r--r--delegate/src/test/ReshapeTest.cpp517
-rw-r--r--delegate/src/test/ResizeTest.cpp134
-rw-r--r--delegate/src/test/ResizeTestHelper.hpp194
-rw-r--r--delegate/src/test/RoundTest.cpp72
-rw-r--r--delegate/src/test/RoundTestHelper.hpp163
-rw-r--r--delegate/src/test/ShapeTest.cpp45
-rw-r--r--delegate/src/test/ShapeTestHelper.hpp173
-rw-r--r--delegate/src/test/SliceTest.cpp81
-rw-r--r--delegate/src/test/SliceTestHelper.hpp183
-rw-r--r--delegate/src/test/SoftmaxTest.cpp77
-rw-r--r--delegate/src/test/SoftmaxTestHelper.hpp194
-rw-r--r--delegate/src/test/SpaceDepthTest.cpp207
-rw-r--r--delegate/src/test/SpaceDepthTestHelper.hpp168
-rw-r--r--delegate/src/test/SplitTest.cpp262
-rw-r--r--delegate/src/test/SplitTestHelper.hpp370
-rw-r--r--delegate/src/test/StridedSliceTest.cpp241
-rw-r--r--delegate/src/test/StridedSliceTestHelper.hpp221
-rw-r--r--delegate/src/test/TestUtils.cpp152
-rw-r--r--delegate/src/test/TestUtils.hpp101
-rw-r--r--delegate/src/test/TransposeTest.cpp46
-rw-r--r--delegate/src/test/TransposeTestHelper.hpp177
-rw-r--r--delegate/src/test/UnidirectionalSequenceLstmTest.cpp1464
-rw-r--r--delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp742
-rw-r--r--delegate/src/test/UnpackTest.cpp179
-rw-r--r--delegate/src/test/UnpackTestHelper.hpp188
122 files changed, 0 insertions, 33487 deletions
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
deleted file mode 100644
index 59066d23e3..0000000000
--- a/delegate/src/Activation.hpp
+++ /dev/null
@@ -1,133 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& outputInfo,
- armnn::ActivationDescriptor& activationDesc)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
- tfLiteContext,
- IsActivationSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo,
- outputInfo,
- activationDesc);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::ActivationDescriptor activationDesc;
- switch(operatorCode)
- {
- case kTfLiteBuiltinRelu:
- {
- activationDesc.m_Function = armnn::ActivationFunction::ReLu;
- break;
- }
- case kTfLiteBuiltinRelu6:
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 6.0f;
- break;
- }
- case kTfLiteBuiltinLogistic:
- {
- activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
- break;
- }
- case kTfLiteBuiltinTanh:
- {
- activationDesc.m_Function = armnn::ActivationFunction::TanH;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = 1.0f;
- break;
- }
- case kTfLiteBuiltinElu:
- {
- activationDesc.m_Function = armnn::ActivationFunction::Elu;
- activationDesc.m_A = 1.0f;
- break;
- }
- case kTfLiteBuiltinHardSwish:
- {
- activationDesc.m_Function = armnn::ActivationFunction::HardSwish;
- break;
- }
- default:
- {
- return kTfLiteError;
- }
- }
- if (!delegateData.m_Network)
- {
- return ValidateActivationOperator(delegateData,
- tfLiteContext,
- inputTensorInfo,
- outputTensorInfo,
- activationDesc);
- }
- armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
- ARMNN_ASSERT(activationLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(activationLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
deleted file mode 100644
index 4e4a2a3f3a..0000000000
--- a/delegate/src/ArgMinMax.hpp
+++ /dev/null
@@ -1,132 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t argMinMaxOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, argMinMaxOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, argMinMaxOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Get const axis value from model and set it to descriptor.
- const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteAxisTensor, argMinMaxOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- armnn::ArgMinMaxDescriptor desc;
- // Get the axis value from the input tensor
- switch (tfLiteAxisTensor.type)
- {
- case kTfLiteInt32:
- case kTfLiteInt64:
- desc.m_Axis = tflite::GetTensorData<int>(&tfLiteAxisTensor)[0];
- break;
- default:
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Axis value data type is not supported in operator #%d node #%d: ",
- argMinMaxOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
- if (argMinMaxOperatorCode == kTfLiteBuiltinArgMax)
- {
- desc.m_Function = armnn::ArgMinMaxFunction::Max;
- auto* argMaxParameters = reinterpret_cast<TfLiteArgMaxParams*>(tfLiteNode->builtin_data);
- if (argMaxParameters->output_type != kTfLiteInt32 && argMaxParameters->output_type != kTfLiteInt64)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
- argMinMaxOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- }
- else
- {
- desc.m_Function = armnn::ArgMinMaxFunction::Min;
- auto* argMinParameters = reinterpret_cast<TfLiteArgMinParams*>(tfLiteNode->builtin_data);
- if (argMinParameters->output_type != kTfLiteInt32 && argMinParameters->output_type != kTfLiteInt64)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
- argMinMaxOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("ARGMINMAX",
- tfLiteContext,
- IsArgMinMaxSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- desc);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add an ArgMinMax layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/BatchMatMul.hpp b/delegate/src/BatchMatMul.hpp
deleted file mode 100644
index 49fba05238..0000000000
--- a/delegate/src/BatchMatMul.hpp
+++ /dev/null
@@ -1,107 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include <algorithm>
-#include <iterator>
-#include <string>
-#include <vector>
-
-namespace armnnDelegate
-{
- TfLiteStatus VisitBatchMatMulOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
- {
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& kTfLiteLHSInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- const TfLiteTensor& kTfLiteRHSInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
-
- if (!IsValid(tfLiteContext, kTfLiteLHSInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- if (!IsValid(tfLiteContext, kTfLiteRHSInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- if (IsDynamicTensor(kTfLiteLHSInputTensor) || IsDynamicTensor(kTfLiteRHSInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& kTfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(kTfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& armnnLHSInputTensorInfo = GetTensorInfoForTfLiteTensor(kTfLiteLHSInputTensor);
- const armnn::TensorInfo& armnnRHSInputTensorInfo = GetTensorInfoForTfLiteTensor(kTfLiteRHSInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(kTfLiteOutputTensor, true);
-
- armnn::BatchMatMulDescriptor descriptor;
- auto* params = reinterpret_cast<TfLiteBatchMatMulParams *>(tfLiteNode->builtin_data);
-
- // Tensorflow params are called adjoint, however they are actually just transposes behind the scene. They do
- // not perform ajoint.
- descriptor.m_TransposeX = params->adj_x;
- descriptor.m_TransposeY = params->adj_y;
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("BATCH_MATMUL",
- tfLiteContext,
- IsBatchMatMulSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- armnnLHSInputTensorInfo,
- armnnRHSInputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- return Connect(layer, tfLiteNode, delegateData);
- }
-} // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
deleted file mode 100644
index 30c6dbfc15..0000000000
--- a/delegate/src/BatchSpace.hpp
+++ /dev/null
@@ -1,216 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteBlockShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteCropsTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if (!IsValid(tfLiteContext, tfLiteCropsTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
- const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteCropsTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
- ::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
-
- std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
- std::memcpy(cropsVector.data(), tfLiteCropsTensor.data.data, cropsTensorInfo.GetNumBytes());
-
- size_t step = 2;
- std::vector<std::pair<unsigned int, unsigned int>> crops;
- for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
- {
- crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
- }
-
- armnn::BatchToSpaceNdDescriptor descriptor;
- descriptor.m_BlockShape = blockShape;
- descriptor.m_Crops = crops;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
- tfLiteContext,
- IsBatchToSpaceNdSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitBatchToSpaceNdOperator will be called again to add the layer to the network as seen below
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a BatchToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteBlockShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLitePadListTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if (!IsValid(tfLiteContext, tfLitePadListTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
- const armnn::TensorInfo& padListTensorInfo = GetTensorInfoForTfLiteTensor(tfLitePadListTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
- std::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
-
- std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
- std::memcpy(padListVector.data(), tfLitePadListTensor.data.data, padListTensorInfo.GetNumBytes());
-
- size_t step = 2;
- std::vector<std::pair<unsigned int, unsigned int>> padList;
- for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
- {
- padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
- }
-
- armnn::SpaceToBatchNdDescriptor descriptor;
- descriptor.m_BlockShape = blockShape;
- descriptor.m_PadList = padList;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
- tfLiteContext,
- IsSpaceToBatchNdSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitSpaceToBatchNdOperator will be called again to add the layer to the network as seen below
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a SpaceToBatch layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
deleted file mode 100644
index 688f90c597..0000000000
--- a/delegate/src/Comparison.hpp
+++ /dev/null
@@ -1,135 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteComparisonOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor0))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLiteComparisonOperatorCode, nodeIndex);
-
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (IsDynamicTensor(tfLiteInputTensor1))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLiteComparisonOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- tfLiteComparisonOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Check if we need to expand the dims of any of the input tensor infos.
- // This is required for a few of the backends.
- if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
- {
- ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
- }
-
- armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
- switch(tfLiteComparisonOperatorCode)
- {
- case kTfLiteBuiltinEqual:
- comparisonOperation = armnn::ComparisonOperation::Equal;
- break;
- case kTfLiteBuiltinGreater:
- comparisonOperation = armnn::ComparisonOperation::Greater;
- break;
- case kTfLiteBuiltinGreaterEqual:
- comparisonOperation = armnn::ComparisonOperation::GreaterOrEqual;
- break;
- case kTfLiteBuiltinLess:
- comparisonOperation = armnn::ComparisonOperation::Less;
- break;
- case kTfLiteBuiltinLessEqual:
- comparisonOperation = armnn::ComparisonOperation::LessOrEqual;
- break;
- case kTfLiteBuiltinNotEqual:
- comparisonOperation = armnn::ComparisonOperation::NotEqual;
- break;
- default:
- return kTfLiteError;
- }
-
- armnn::ComparisonDescriptor descriptor(comparisonOperation);
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("COMPARISON",
- tfLiteContext,
- IsComparisonSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
- comparisonLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(comparisonLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- return Connect(comparisonLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
deleted file mode 100644
index a3ea6e92a7..0000000000
--- a/delegate/src/Control.hpp
+++ /dev/null
@@ -1,342 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-#include <algorithm>
-#include <iterator>
-#include <string>
-#include <vector>
-
-namespace armnnDelegate
-{
-
-void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo,
- armnn::OriginsDescriptor& concatDescriptor,
- const unsigned int concatAxis,
- unsigned int inputIndex,
- unsigned int& mergeDimOrigin)
-{
- const uint32_t inputRank = concatDescriptor.GetNumDimensions();
-
- // double check dimensions of the tensors
- if (inputTensorInfo.GetNumDimensions() != inputRank)
- {
- throw armnn::ParseException("The number of dimensions for input tensors "
- "of the concatenation operator should be: " + std::to_string(inputRank));
- }
-
- for (unsigned int j = 0; j < concatAxis; ++j)
- {
- concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
- }
-
- concatDescriptor.SetViewOriginCoord(inputIndex, concatAxis, mergeDimOrigin);
- mergeDimOrigin += inputTensorInfo.GetShape()[concatAxis];
-
- for (unsigned int j = concatAxis + 1; j < inputRank; ++j)
- {
- concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
- }
-}
-
-TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteConcatOperatorCode)
-{
- unsigned int numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- std::vector<armnn::TensorInfo> inputTensorInfos;
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteConcatOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- inputTensorInfos.emplace_back(inputTensorInfo);
- }
-
- // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
- std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
- std::transform(inputTensorInfos.begin(),
- inputTensorInfos.end(),
- std::back_inserter(inputConstTensorInfos),
- [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- // Setup OriginsDescriptor, axis and view origin
- unsigned int numConcatView = static_cast<unsigned int>(numInputs);
- uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
-
- auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
-
- if(!concatenationParameters)
- {
- throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex]);
- }
-
- const unsigned int concatDimInput = static_cast<unsigned int>(
- (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
-
- armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
- concatDescriptor.SetConcatAxis(concatDimInput);
-
- unsigned int mergeDimOrigin = 0;
- for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
- {
- armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(
- tfLiteTensors[tfLiteNode->inputs->data[viewIndex]]);
-
- // Sets up concatDescriptor view origin
- SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
- }
-
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Verify we support the fused activation before attempting to create a layer
- TfLiteFusedActivation activationType = concatenationParameters->activation;
-
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
- tfLiteContext,
- IsConcatSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputConstTensorInfos,
- outputTensorInfo,
- concatDescriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Setup layer and connect.
- armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
- concatenationLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(concatenationLayer != nullptr);
-
- // Connect the Constant Inputs
- auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
- if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- if (activationType == kTfLiteActNone)
- {
- // No Activation
- return kTfLiteOk;
- }
-
- // Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
-}
-
-TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteMeanOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if(!IsValid(&tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
- tfLiteMeanOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLiteMeanOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if(!IsValid(&tfLiteAxisTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid axis tensor in operator #%d node #%d: ",
- tfLiteMeanOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteAxisTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic axis tensors are not supported in operator #%d node #%d: ",
- tfLiteMeanOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if(!IsValid(&tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
- tfLiteAxisTensor, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- tfLiteMeanOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
-
- std::vector<int32_t> axis;
- // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
- for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
- {
- axis.emplace_back(axisTensorData[i]);
- }
-
- // Convert the axis to unsigned int and remove duplicates.
- unsigned int rank = inputTensorInfo.GetNumDimensions();
- std::set<unsigned int> uniqueAxis;
- std::transform(axis.begin(),
- axis.end(),
- std::inserter(uniqueAxis, uniqueAxis.begin()),
- [rank](int i)->unsigned int{ return (i + rank) % rank; });
-
- // Setup MeanDescriptor and assign axis and keepDims
- armnn::MeanDescriptor desc;
- desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
- desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("MEAN",
- tfLiteContext,
- IsMeanSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- desc);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Setup layer and connect.
- armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
- meanLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(meanLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- return Connect(meanLayer, tfLiteNode, delegateData);
-}
-
-TfLiteStatus VisitControlOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- operatorCode);
-
- switch(operatorCode)
- {
- case kTfLiteBuiltinConcatenation:
- return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
- case kTfLiteBuiltinMean:
- return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
- default:
- return kTfLiteError;
- }
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
deleted file mode 100644
index 31cb2ab9ac..0000000000
--- a/delegate/src/Convolution.hpp
+++ /dev/null
@@ -1,870 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include "SharedFunctions.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include "tensorflow/lite/kernels/internal/tensor.h"
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- armnn::Convolution2dDescriptor descriptor;
- const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
-
- bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
- descriptor.m_BiasEnabled = biasEnabled;
- descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
- descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
- descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if(!IsValid(&tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if(!IsValid(&tfLiteFilterTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteFilterTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType=kTfLiteActNone;
- if (tfLiteNodeParameters)
- {
- activationType = tfLiteNodeParameters->activation;
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- }
-
- const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
-
- armnn::TensorInfo biasTensorInfo;
- if(biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(!IsValid(&tfLiteBiasTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteBiasTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
- biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
- }
- else
- {
- biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
- }
-
- armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
-
- // TfLite uses NHWC tensors
- const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
- const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
-
- const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
- const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
-
- // Calculate padding
- CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
- descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
- CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
- descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
- tfLiteContext,
- IsConvolution2dSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor,
- filterTensorInfo,
- optionalBiasInfo);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Set up filter and biases
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
- layer->SetBackendId(setBackend);
-
- if(filterTensorInfo.IsConstant())
- {
- auto filter =
- CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
- filterTensorInfo);
-
- armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
- weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
- }
-
- if (biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(biasTensorInfo.IsConstant())
- {
- auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
- ARMNN_ASSERT(biasLayer != nullptr);
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
- }
- }
-
- // The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
- {
- auto input =
- CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
- inputTensorInfo);
-
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
- inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- }
-
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- if (!tfLiteNodeParameters)
- {
- // No Activation
- return kTfLiteOk;
- }
- // Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
-
-}
-
-// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
-#if defined(ARMNN_POST_TFLITE_2_5)
-TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- armnn::Convolution3dDescriptor descriptor;
- const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
-
- bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
- descriptor.m_BiasEnabled = biasEnabled;
- descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
- descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
- descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
- descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
- descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
- descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
- descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType=kTfLiteActNone;
- if (tfLiteNodeParameters)
- {
- activationType = tfLiteNodeParameters->activation;
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- }
-
- const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
-
- armnn::TensorInfo biasTensorInfo;
- if(biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
- }
- else
- {
- biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
- }
-
- armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
-
- // TfLite uses NDHWC tensors
- const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
- const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
- const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
-
- // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
- const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
- const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
- const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
-
- // Calculate padding
- CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
- descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
- CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
- descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
- CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
- descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
- tfLiteContext,
- IsConvolution3dSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor,
- filterTensorInfo,
- optionalBiasInfo);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- // Add a constant layer for weights and biases if inputs are constant,
- // which are connected to the Convolution3d layer as inputs.
- if (filterTensorInfo.IsConstant())
- {
- auto filter = CreateConstTensor(&tfLiteFilterTensor,
- filterTensorInfo);
-
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
- ARMNN_ASSERT(weightsLayer != nullptr);
-
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
- weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
- }
-
- if(biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(biasTensorInfo.IsConstant())
- {
- auto biases = CreateConstTensor(&tfLiteBiasTensor,
- biasTensorInfo);
-
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
- ARMNN_ASSERT(biasLayer != nullptr);
-
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
- }
- }
-
- // The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
- {
- auto input =
- CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
- inputTensorInfo);
-
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
- inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- }
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- if (!tfLiteNodeParameters)
- {
- // No Activation
- return kTfLiteOk;
- }
-
- // Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
-}
-#endif
-
-TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
-
- armnn::DepthwiseConvolution2dDescriptor descriptor;
- const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
-
- descriptor.m_BiasEnabled = biasEnabled;
- descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
- descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
- descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if(!IsValid(&tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if(!IsValid(&tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if(!IsValid(&tfLiteFilterTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteFilterTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType = kTfLiteActNone;
- if (tfLiteNodeParameters)
- {
- activationType = tfLiteNodeParameters->activation;
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- }
-
- const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
-
- // Assuming input is NHWC
- unsigned int inputHeight = inputTensorInfo.GetShape()[1];
- unsigned int inputWidth = inputTensorInfo.GetShape()[2];
-
- // TensorflowLite weights come in the format [1, H, W, I * M]
- unsigned int filterHeight = filterTensorInfo.GetShape()[1];
- unsigned int filterWidth = filterTensorInfo.GetShape()[2];
-
- // Calculate padding
- CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
- descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
- CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
- descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
-
- armnn::TensorInfo biasTensorInfo;
- if(biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(!IsValid(&tfLiteBiasTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteBiasTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
- biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
- }
- else
- {
- biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
- }
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
- tfLiteContext,
- IsDepthwiseConvolutionSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor,
- filterTensorInfo,
- armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
- layer->SetBackendId(setBackend);
-
- if(filterTensorInfo.IsConstant())
- {
- // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
- auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
-
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
- weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
- }
-
- if (biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(biasTensorInfo.IsConstant())
- {
- auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
- ARMNN_ASSERT(biasLayer != nullptr);
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
- }
- }
-
- // The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
- {
- auto input =
- CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
- inputTensorInfo);
-
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
- inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- }
-
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- if (!tfLiteNodeParameters)
- {
- // No Activation
- return kTfLiteOk;
- }
- // Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
-}
-
-TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- armnn::TransposeConvolution2dDescriptor descriptor;
- auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
- descriptor.m_BiasEnabled = false;
- descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
- descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if(!IsValid(&tfLiteOutputShapeTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteOutputShapeTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
- std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
- if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
- {
- for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
- {
- outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
- }
- }
-
- if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
- {
- for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
- {
- outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
- }
- }
- // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
- for (int dimension : outputShape)
- {
- descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
- }
- descriptor.m_OutputShapeEnabled = true;
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(!IsValid(&tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if(!IsValid(&tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if(!IsValid(&tfLiteFilterTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
- if (IsDynamicTensor(tfLiteFilterTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
- const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
-
- // TfLite uses NHWC tensors
- const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
- const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
-
- const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
- const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
-
- // Calculate padding
- CalcPadding(inputHeight,
- filterHeight,
- descriptor.m_StrideY,
- 1, // dilation y
- descriptor.m_PadTop,
- descriptor.m_PadBottom,
- parameters->padding);
- CalcPadding(inputWidth,
- filterWidth,
- descriptor.m_StrideX,
- 1, // dilation x
- descriptor.m_PadLeft,
- descriptor.m_PadRight,
- parameters->padding);
-
- // Set up filter
- auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
- filterTensorInfo);
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
- tfLiteContext,
- IsTransposeConvolution2dSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor,
- filterTensorInfo,
- armnn::EmptyOptional());
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
- filterTensor,
- armnn::EmptyOptional());
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- // The data input can be constant, so we must check that this is allocated to an input slot
- if(inputTensorInfo.IsConstant())
- {
- auto input =
- CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
- inputTensorInfo);
-
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
- inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- }
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // Connect
- if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
- {
- delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
- Connect(layer->GetInputSlot(0));
- }
-
- // Prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
- {
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
- delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
- &outputSlot;
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- switch(operatorCode)
- {
- case kTfLiteBuiltinConv2d:
- return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
-// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
-#if defined(ARMNN_POST_TFLITE_2_5)
- case kTfLiteBuiltinConv3d:
- return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
-#endif
- case kTfLiteBuiltinDepthwiseConv2d:
- return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
- case kTfLiteBuiltinTransposeConv:
- return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
- default:
- return kTfLiteError;
- }
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/DelegateOptions.cpp b/delegate/src/DelegateOptions.cpp
deleted file mode 100644
index fc4858fa29..0000000000
--- a/delegate/src/DelegateOptions.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <DelegateOptions.hpp>
-#include <armnn/utility/NumericCast.hpp>
-#include <armnn/utility/StringUtils.hpp>
-
-namespace armnnDelegate
-{
-
-DelegateOptions::DelegateOptions(armnn::Compute computeDevice,
- const std::vector<armnn::BackendOptions>& backendOptions,
- const armnn::Optional<armnn::LogSeverity> logSeverityLevel)
- : m_Backends({computeDevice}), m_RuntimeOptions(), m_LoggingSeverity(logSeverityLevel)
-{
- m_RuntimeOptions.m_BackendOptions = backendOptions;
-}
-
-DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends,
- const std::vector<armnn::BackendOptions>& backendOptions,
- const armnn::Optional<armnn::LogSeverity> logSeverityLevel)
- : m_Backends(backends), m_RuntimeOptions(), m_LoggingSeverity(logSeverityLevel)
-{
- m_RuntimeOptions.m_BackendOptions = backendOptions;
-}
-
-DelegateOptions::DelegateOptions(armnn::Compute computeDevice,
- const armnn::OptimizerOptions& optimizerOptions,
- const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
- const armnn::Optional<armnn::DebugCallbackFunction>& func)
- : m_Backends({computeDevice}),
- m_RuntimeOptions(),
- m_OptimizerOptions(optimizerOptions),
- m_LoggingSeverity(logSeverityLevel),
- m_DebugCallbackFunc(func)
-{
-}
-
-DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends,
- const armnn::OptimizerOptions& optimizerOptions,
- const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
- const armnn::Optional<armnn::DebugCallbackFunction>& func)
- : m_Backends(backends),
- m_RuntimeOptions(),
- m_OptimizerOptions(optimizerOptions),
- m_LoggingSeverity(logSeverityLevel),
- m_DebugCallbackFunc(func)
-{
-}
-
-DelegateOptions::DelegateOptions(char const* const* options_keys,
- char const* const* options_values,
- size_t num_options,
- void (*report_error)(const char*))
-{
- armnn::IRuntime::CreationOptions runtimeOptions;
- armnn::OptimizerOptions optimizerOptions;
- bool internalProfilingState = false;
- armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
- for (size_t i = 0; i < num_options; ++i)
- {
- // Process backends
- if (std::string(options_keys[i]) == std::string("backends"))
- {
- // The backend option is a comma separated string of backendIDs that needs to be split
- std::vector<armnn::BackendId> backends;
- char* dup = strdup(options_values[i]);
- char* pch = std::strtok(dup, ",");
- while (pch != NULL)
- {
- backends.push_back(pch);
- pch = strtok (NULL, ",");
- }
- this->SetBackends(backends);
- }
- // Process dynamic-backends-path
- else if (std::string(options_keys[i]) == std::string("dynamic-backends-path"))
- {
- runtimeOptions.m_DynamicBackendsPath = std::string(options_values[i]);
- }
- // Process logging level
- else if (std::string(options_keys[i]) == std::string("logging-severity"))
- {
- this->SetLoggingSeverity(options_values[i]);
- }
- // Process GPU backend options
- else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
- {
- armnn::BackendOptions option("GpuAcc", {{"TuningLevel", atoi(options_values[i])}});
- runtimeOptions.m_BackendOptions.push_back(option);
- }
- else if (std::string(options_keys[i]) == std::string("gpu-mlgo-tuning-file"))
- {
- armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", std::string(options_values[i])}});
- optimizerOptions.m_ModelOptions.push_back(option);
- }
- else if (std::string(options_keys[i]) == std::string("gpu-tuning-file"))
- {
- armnn::BackendOptions option("GpuAcc", {{"TuningFile", std::string(options_values[i])}});
- runtimeOptions.m_BackendOptions.push_back(option);
- }
- else if (std::string(options_keys[i]) == std::string("gpu-enable-profiling"))
- {
- runtimeOptions.m_EnableGpuProfiling = (*options_values[i] != '0');
- }
- else if (std::string(options_keys[i]) == std::string("gpu-kernel-profiling-enabled"))
- {
- armnn::BackendOptions option("GpuAcc", {{"KernelProfilingEnabled",
- armnn::stringUtils::StringToBool(options_values[i])}});
- runtimeOptions.m_BackendOptions.push_back(option);
- }
- else if (std::string(options_keys[i]) == std::string("save-cached-network"))
- {
- armnn::BackendOptions option("GpuAcc", {{"SaveCachedNetwork",
- armnn::stringUtils::StringToBool(options_values[i])}});
- optimizerOptions.m_ModelOptions.push_back(option);
- }
- else if (std::string(options_keys[i]) == std::string("cached-network-filepath"))
- {
- armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath", std::string(options_values[i])}});
- optimizerOptions.m_ModelOptions.push_back(option);
- }
- // Process GPU & CPU backend options
- else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
- {
- armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled",
- armnn::stringUtils::StringToBool(options_values[i])}});
- optimizerOptions.m_ModelOptions.push_back(modelOptionGpu);
-
- armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled",
- armnn::stringUtils::StringToBool(options_values[i])}});
- optimizerOptions.m_ModelOptions.push_back(modelOptionCpu);
- }
- // Process CPU backend options
- else if (std::string(options_keys[i]) == std::string("number-of-threads"))
- {
- unsigned int numberOfThreads = armnn::numeric_cast<unsigned int>(atoi(options_values[i]));
- armnn::BackendOptions modelOption("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
- optimizerOptions.m_ModelOptions.push_back(modelOption);
- }
- // Process reduce-fp32-to-fp16 option
- else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
- {
- optimizerOptions.m_ReduceFp32ToFp16 = armnn::stringUtils::StringToBool(options_values[i]);
- }
- // Process debug-data
- else if (std::string(options_keys[i]) == std::string("debug-data"))
- {
- optimizerOptions.m_Debug = armnn::stringUtils::StringToBool(options_values[i]);
- }
- // Infer output-shape
- else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
- {
- armnn::BackendOptions backendOption("ShapeInferenceMethod",
- {
- { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) }
- });
- optimizerOptions.m_ModelOptions.push_back(backendOption);
- }
- // Allow expanded dims
- else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
- {
- armnn::BackendOptions backendOption("AllowExpandedDims",
- {
- { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) }
- });
- optimizerOptions.m_ModelOptions.push_back(backendOption);
- }
- // Process memory-import
- else if (std::string(options_keys[i]) == std::string("memory-import"))
- {
- optimizerOptions.m_ImportEnabled = armnn::stringUtils::StringToBool(options_values[i]);
- }
- // Process enable-internal-profiling
- else if (std::string(options_keys[i]) == std::string("enable-internal-profiling"))
- {
- internalProfilingState = *options_values[i] != '0';
- optimizerOptions.m_ProfilingEnabled = internalProfilingState;
- }
- // Process internal-profiling-detail
- else if (std::string(options_keys[i]) == std::string("internal-profiling-detail"))
- {
- uint32_t detailLevel = static_cast<uint32_t>(std::stoul(options_values[i]));
- switch (detailLevel)
- {
- case 1:
- internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
- break;
- case 2:
- internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsOnly;
- break;
- default:
- internalProfilingDetail = armnn::ProfilingDetailsMethod::Undefined;
- break;
- }
- }
- // Process enable-external-profiling
- else if (std::string(options_keys[i]) == std::string("enable-external-profiling"))
- {
- runtimeOptions.m_ProfilingOptions.m_EnableProfiling =
- armnn::stringUtils::StringToBool(options_values[i]);
- }
- // Process timeline-profiling
- else if (std::string(options_keys[i]) == std::string("timeline-profiling"))
- {
- runtimeOptions.m_ProfilingOptions.m_TimelineEnabled = armnn::stringUtils::StringToBool(options_values[i]);
- }
- // Process outgoing-capture-file
- else if (std::string(options_keys[i]) == std::string("outgoing-capture-file"))
- {
- runtimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile = options_values[i];
- }
- // Process incoming-capture-file
- else if (std::string(options_keys[i]) == std::string("incoming-capture-file"))
- {
- runtimeOptions.m_ProfilingOptions.m_IncomingCaptureFile = options_values[i];
- }
- // Process file-only-external-profiling
- else if (std::string(options_keys[i]) == std::string("file-only-external-profiling"))
- {
- runtimeOptions.m_ProfilingOptions.m_FileOnly = armnn::stringUtils::StringToBool(options_values[i]);
- }
- // Process counter-capture-period
- else if (std::string(options_keys[i]) == std::string("counter-capture-period"))
- {
- runtimeOptions.m_ProfilingOptions.m_CapturePeriod = static_cast<uint32_t>(std::stoul(options_values[i]));
- }
- // Process profiling-file-format
- else if (std::string(options_keys[i]) == std::string("profiling-file-format"))
- {
- runtimeOptions.m_ProfilingOptions.m_FileFormat = options_values[i];
- }
- // Process serialize-to-dot
- else if (std::string(options_keys[i]) == std::string("serialize-to-dot"))
- {
- this->SetSerializeToDot(options_values[i]);
- }
-
- // Process disable-tflite-runtime-fallback
- else if (std::string(options_keys[i]) == std::string("disable-tflite-runtime-fallback"))
- {
- this->DisableTfLiteRuntimeFallback(armnn::stringUtils::StringToBool(options_values[i]));
- }
- else
- {
- throw armnn::Exception("Unknown option for the ArmNN Delegate given: " + std::string(options_keys[i]));
- }
- }
-
- this->SetRuntimeOptions(runtimeOptions);
- this->SetOptimizerOptions(optimizerOptions);
- this->SetInternalProfilingParams(internalProfilingState, internalProfilingDetail);
-}
-} // namespace armnnDelegate
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
deleted file mode 100644
index 1aa9029271..0000000000
--- a/delegate/src/DelegateUtils.hpp
+++ /dev/null
@@ -1,639 +0,0 @@
-//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn_delegate.hpp>
-
-#include <armnn/ArmNN.hpp>
-#include <armnn/BackendHelper.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/NumericCast.hpp>
-
-#include <armnnUtils/Permute.hpp>
-#include <armnnUtils/TensorUtils.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-namespace
-{
-
-// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
-try \
-{ \
- for (auto&& backendId : backends) \
- { \
- auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
- if (layerSupportObject.IsBackendRegistered()) \
- { \
- std::string reasonIfUnsupported; \
- supported = \
- layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
- if (supported) \
- { \
- setBackend = backendId; \
- break; \
- } \
- else \
- { \
- if (reasonIfUnsupported.size() > 0) \
- { \
- TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
- "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
- } \
- else \
- { \
- TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
- "%s: not supported by armnn", opName); \
- } \
- } \
- } \
- else \
- { \
- TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
- } \
- } \
- if (!supported) \
- { \
- TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
- } \
-} \
-catch (const armnn::InvalidArgumentException &e) \
-{ \
- throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
-}
-
-TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- const unsigned int expectedSize,
- int nodeIndex)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (static_cast<unsigned int >(numInputs) != expectedSize)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
- numInputs, expectedSize, nodeIndex);
- return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- const unsigned int expectedSize,
- int nodeIndex)
-{
- auto numOutputs = tfLiteNode->outputs->size;
- if (static_cast<unsigned int >(numOutputs) != expectedSize)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
- numOutputs, expectedSize, nodeIndex);
- return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
-bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
-{
- auto tensorAllocationType = tfLiteTensor.allocation_type;
- if (tensorAllocationType == kTfLiteDynamic)
- {
- return true;
- }
- return false;
-}
-
-bool IsValid(const TfLiteTensor* tfLiteTensor)
-{
- return tfLiteTensor == nullptr ? false : true;
-}
-
-bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
-{
- if(!IsValid(&tfLiteTensor))
- {
- std::cout << "..Is Not Valid" << std::endl;
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return false;
- }
- if (IsDynamicTensor(tfLiteTensor))
- {
- std::cout << "..IsDynamicTensor" << std::endl;
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return false;
- }
- return true;
-}
-
-uint32_t NonNegative(int32_t value, int nodeIndex)
-{
- if (value < 0)
- {
- throw armnn::Exception(
- "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
- }
- else
- {
- return static_cast<uint32_t>(value);
- }
-}
-
-bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
-{
- auto quantizationInfo = tfLiteTensor.quantization;
- if (quantizationInfo.type == kTfLiteAffineQuantization)
- {
- return true;
- }
- return false;
-}
-
-TfLiteStatus Connect(armnn::IConnectableLayer* layer,
- TfLiteNode* tfLiteNode,
- armnnDelegate::DelegateData& data)
-{
- ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
-
- // Connect the input slots
- for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
- {
- if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
- {
- data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
- }
- }
-
- // Prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
- {
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
- data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
-
- return kTfLiteOk;
-}
-
-void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
- armnn::TensorInfo& inputInfo1)
-{
- unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
- unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
-
- if (inputDimensions0 == inputDimensions1)
- {
- return;
- }
-
- unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
-
- bool input0IsSmaller = inputDimensions0 < inputDimensions1;
- armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
- const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
-
- smallInfo.SetShape(newShape);
-
-}
-
-TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- TfLiteFusedActivation activationType,
- armnn::IConnectableLayer* prevLayer,
- unsigned int outputSlotIndex,
- armnnDelegate::DelegateData& data)
-{
-
- const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
-
- armnn::ActivationDescriptor activationDesc;
-
- switch (activationType)
- {
- case kTfLiteActNone:
- {
- // No Activation
- return kTfLiteOk;
- }
- case kTfLiteActRelu:
- {
- activationDesc.m_Function = armnn::ActivationFunction::ReLu;
- break;
- }
-// The name of kTfLiteActRelu1 changed after TF Lite v2.3
-#if defined(ARMNN_POST_TFLITE_2_3)
- case kTfLiteActReluN1To1:
-#else
- case kTfLiteActRelu1:
-#endif
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = -1.0f;
- break;
- }
- case kTfLiteActRelu6:
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 6.0f;
- activationDesc.m_B = 0.0f;
- break;
- }
- case kTfLiteActSigmoid:
- {
- activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
- break;
- }
- case kTfLiteActTanh:
- {
- activationDesc.m_Function = armnn::ActivationFunction::TanH;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = 1.0f;
- break;
- }
- default:
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
- tfLiteContext,
- IsActivationSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- activationOutputInfo,
- activationOutputInfo,
- activationDesc);
- if (!isSupported)
- {
- return kTfLiteError;
- }
- armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
- activationLayer->SetBackendId(setBackend);
-
- ARMNN_ASSERT(activationLayer != nullptr);
- activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
-
- // Connect and prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
- {
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
- armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
- return kTfLiteOk;
-}
-
-armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- armnn::IConnectableLayer* prevLayer,
- armnn::TensorInfo reshapedOutputTensorInfo,
- armnn::TensorInfo outputTensorInfo,
- armnnDelegate::DelegateData& data)
-{
- armnn::ReshapeDescriptor desc;
- desc.m_TargetShape = outputTensorInfo.GetShape();
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
- tfLiteContext,
- IsReshapeSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- reshapedOutputTensorInfo,
- outputTensorInfo,
- desc);
-
- if (!isSupported)
- {
- return nullptr;
- }
-
- armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
- reshapeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(reshapeLayer != nullptr);
-
- prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- // Connect and prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
- {
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
- armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
- data.m_OutputSlotForNode[static_cast<unsigned long>(
- tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
- return reshapeLayer;
-}
-
-armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
-{
- switch (tfLiteTensor.type)
- {
- case kTfLiteBool:
- return armnn::DataType::Boolean;
- case kTfLiteFloat32:
- return armnn::DataType::Float32;
- case kTfLiteFloat16:
- return armnn::DataType::Float16;
- case kTfLiteUInt8:
- return armnn::DataType::QAsymmU8;
- case kTfLiteInt8:
- {
- auto quantizationInfo = tfLiteTensor.quantization;
- if (quantizationInfo.type == kTfLiteAffineQuantization)
- {
- auto* quantization =
- reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
- if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
- {
- return armnn::DataType::QAsymmS8;
- }
- else
- {
- return armnn::DataType::QSymmS8;
- }
- }
- else
- {
- return armnn::DataType::QAsymmS8;
- }
- }
- case kTfLiteInt16:
- return armnn::DataType::QSymmS16;
- case kTfLiteInt32:
- return armnn::DataType::Signed32;
- case kTfLiteInt64:
- return armnn::DataType::Signed64;
- default:
- throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
- }
-}
-
-armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
-{
- armnn::DataType type = GetDataType(tfLiteTensor);
- armnn::TensorInfo ret;
- auto tensorDimensionSize = tfLiteTensor.dims->size;
- if (tensorDimensionSize == 0)
- {
- // If input tensor does not have a shape
- // assuming that it has 1D tensor
- if (!isOutput)
- {
- std::vector<unsigned int> safeShape = { 1 };
- bool dimensionsSpecificity[1] = { true };
- armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
- safeShape.data(),
- dimensionsSpecificity);
- ret = armnn::TensorInfo(tensorShape, type);
- if(tflite::IsConstantTensor(&tfLiteTensor))
- {
- ret.SetConstant(true);
- }
- }
- else
- {
- armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
- ret = armnn::TensorInfo(tensorShape, type);
- }
- }
- else
- {
- std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
- bool dimensionsSpecificity[5] = { true, true, true, true, true };
- for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
- auto dim = tfLiteTensor.dims->data[i];
- if (dim == 0)
- {
- dimensionsSpecificity[i] = false;
- }
- tensorDims[i] = static_cast<unsigned int>(dim);
- }
- armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
- tensorDims.data(),
- dimensionsSpecificity);
-
- if(tflite::IsConstantTensor(&tfLiteTensor))
- {
- ret = armnn::TensorInfo(tensorShape, type);
- ret.SetConstant(true);
- }
- else
- {
- ret = armnn::TensorInfo(tensorShape, type);
- }
- }
-
- auto quantizationInfo = tfLiteTensor.quantization;
- if (quantizationInfo.type == kTfLiteAffineQuantization)
- {
- // get per-channel quantization parameters
- const auto* affineQuantization =
- reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
- if (affineQuantization->scale->size > 1)
- {
- std::vector<float> quantizationScales;
- for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
- {
- quantizationScales.push_back(affineQuantization->scale->data[i]);
- }
- ret.SetQuantizationScales(quantizationScales);
- ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
- }
- else
- {
- ret.SetQuantizationScale(affineQuantization->scale->data[0]);
- ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
- }
- }
- else
- {
- auto quantizationParameters = tfLiteTensor.params;
- ret.SetQuantizationScale(quantizationParameters.scale);
- ret.SetQuantizationOffset(quantizationParameters.zero_point);
- }
-
- return ret;
-}
-
-armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
- const armnn::TensorInfo& tensorInfo)
-{
- if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
- {
- throw armnn::Exception(
- "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
- }
-
- return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
-}
-
-armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
-{
- const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
- armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
- return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
-}
-
-void CalcPadding(uint32_t inputSize,
- uint32_t filterSize,
- uint32_t stride,
- uint32_t dilation,
- uint32_t& paddingFront,
- uint32_t& paddingBack,
- TfLitePadding padding)
-{
- paddingFront = 0;
- paddingBack = 0;
- if (padding == kTfLitePaddingSame)
- {
- uint32_t outputSize = (inputSize + stride - 1) / stride;
- uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
- uint32_t temp = (outputSize - 1) * stride + dilatedSize;
- if (temp > inputSize)
- {
- paddingFront = (temp - inputSize) / 2;
- paddingBack = (temp - inputSize) - paddingFront;
- }
- }
-}
-
-TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
- const armnn::TensorInfo& constTensorInfo,
- TfLiteContext* tfLiteContext,
- const TfLiteTensor& tfLiteTensor,
- armnnDelegate::DelegateData& data,
- unsigned int slotIndex)
-{
- IgnoreUnused(layer);
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
- tfLiteContext,
- IsConstantSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- constTensorInfo);
- if (!isSupported)
- {
- return kTfLiteError;
- }
-
- auto constantInput = CreateConstTensor(&tfLiteTensor,
- constTensorInfo);
- armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
- constantLayer->SetBackendId(setBackend);
- armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(constTensorInfo);
-
- data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
-
- return kTfLiteOk;
-}
-
-bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
-{
- // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
- // less then the input is not present.
- if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
- {
- return true;
- }
- return false;
-}
-
-TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
- armnnDelegate::DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode)
-{
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- // Process input tensors
- // If input tensor is a Constant tensor create a constant layer and connect it to the network
- for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
- {
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
- if (tflite::IsConstantTensor(&tfLiteInputTensor))
- {
- armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- bool isSupported = false;
- armnn::BackendId setBackend;
- FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
- tfLiteContext,
- IsConstantSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo);
- if (!isSupported)
- {
- return kTfLiteError;
- }
- auto constantInput = CreateConstTensor(&tfLiteInputTensor,
- inputTensorInfo);
- armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
- constantLayer->SetBackendId(setBackend);
- armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(inputTensorInfo);
-
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
- }
- }
- return kTfLiteOk;
-}
-
-unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
-{
- int numDims = armnn::numeric_cast<int>(numDimensions);
- int wrappedIndex = index < 0 ? numDims + index : index;
- ARMNN_ASSERT(wrappedIndex >= 0);
- ARMNN_ASSERT(wrappedIndex < numDims);
-
- return static_cast<unsigned int>(wrappedIndex);
-};
-
-bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
- (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
- (armnn::DataType::Signed32 == outputInfo.GetDataType());
-}
-
-void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
-{
- // If input tensor info is constant and output tensor info shape is not specified
- // set the output shape from input shape
- if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
- {
- outputInfo.SetShape(inputInfo.GetShape());
- }
- return;
-}
-
-} // namespace anonymous
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
deleted file mode 100644
index fa9021b5c1..0000000000
--- a/delegate/src/ElementwiseBinary.hpp
+++ /dev/null
@@ -1,401 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include "MultiLayerFacade.hpp"
-#include "SharedFunctions.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include "tensorflow/lite/delegates/utils.h"
-
-namespace armnnDelegate
-{
-
-TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- std::vector<armnn::TensorInfo> infos { inputInfo1, inputInfo2, outputInfo };
- FORWARD_LAYER_SUPPORT_FUNC("ADD",
- tfLiteContext,
- IsElementwiseBinarySupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo1,
- inputInfo2,
- outputInfo,
- armnn::BinaryOperation::Add);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-
-TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("DIV",
- tfLiteContext,
- IsElementwiseBinarySupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo1,
- inputInfo2,
- outputTensorInfo,
- armnn::BinaryOperation::Div);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus ValidateFloorDivOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- // need first to validate that the div operator is supported
- // then that the floor operator is supported
- TfLiteStatus status = ValidateDivOperator(delegateData, tfLiteContext, inputInfo1, inputInfo2, outputInfo);
- if (status != kTfLiteOk)
- {
- return status;
- }
- // if the inputs and output of the div are all Signed32 we don't need to add the floor operator afterward.
- if (AreAllSigned32(inputInfo1, inputInfo2, outputInfo))
- {
- return status;
- }
- // in case broadcasting is being done from one of the inputs to the div
- // choose the full sized input tensor to pass to the floor validation routine
- armnn::TensorInfo floorInputInfo = inputInfo1;
- if (inputInfo1.GetNumDimensions() < inputInfo2.GetNumDimensions())
- {
- floorInputInfo = inputInfo2;
- }
- status = ValidateFloorOperator(delegateData, tfLiteContext, floorInputInfo, outputInfo);
- return status;
-}
-
-TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
- tfLiteContext,
- IsElementwiseBinarySupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo1,
- inputInfo2,
- outputTensorInfo,
- armnn::BinaryOperation::Maximum);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
- tfLiteContext,
- IsElementwiseBinarySupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo1,
- inputInfo2,
- outputTensorInfo,
- armnn::BinaryOperation::Minimum);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("MUL",
- tfLiteContext,
- IsElementwiseBinarySupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo1,
- inputInfo2,
- outputTensorInfo,
- armnn::BinaryOperation::Mul);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo1,
- const armnn::TensorInfo& inputInfo2,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("SUB",
- tfLiteContext,
- IsElementwiseBinarySupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo1,
- inputInfo2,
- outputTensorInfo,
- armnn::BinaryOperation::Sub);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
- DelegateData& delegateData,
- const armnn::TensorInfo& outputTensorInfo)
-{
- armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
- // if the output of the div is Signed32 the Floor layer is not required
- if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
- {
- return std::make_pair(divisionLayer, divisionLayer);
- }
- armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
- armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
- outputSlot.Connect(floorLayer->GetInputSlot(0));
- return std::make_pair(divisionLayer, floorLayer);
-}
-
-TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t elementwiseBinaryOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor0))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- elementwiseBinaryOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (IsDynamicTensor(tfLiteInputTensor1))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- elementwiseBinaryOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- elementwiseBinaryOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
-
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Check if we need to expand the dims of the input tensor infos.
- // This is required for a few of the backends.
- if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
- {
- ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
- }
-
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType = kTfLiteActNone;
- if (tfLiteNodeParameters)
- {
- activationType = tfLiteNodeParameters->activation;
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
- }
-
- if (!delegateData.m_Network)
- {
- switch(elementwiseBinaryOperatorCode)
- {
- case kTfLiteBuiltinAdd:
- return ValidateAddOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- case kTfLiteBuiltinDiv:
- return ValidateDivOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- case kTfLiteBuiltinFloorDiv:
- return ValidateFloorDivOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- case kTfLiteBuiltinMaximum:
- return ValidateMaximumOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- case kTfLiteBuiltinMinimum:
- return ValidateMinimumOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- case kTfLiteBuiltinMul:
- return ValidateMulOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- case kTfLiteBuiltinSub:
- return ValidateSubOperator(delegateData,
- tfLiteContext,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo);
- default:
- return kTfLiteError;
- }
- }
-
- armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
- MultiLayerFacade multiLayer;
- switch(elementwiseBinaryOperatorCode)
- {
- case kTfLiteBuiltinAdd:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Add);
- break;
- case kTfLiteBuiltinDiv:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
- break;
- case kTfLiteBuiltinFloorDiv:
- {
- auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
- multiLayer.AssignValues(layers.first, layers.second);
- elementwiseBinaryLayer = &multiLayer;
- }
- break;
- case kTfLiteBuiltinMaximum:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Maximum);
- break;
- case kTfLiteBuiltinMinimum:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Minimum);
- break;
- case kTfLiteBuiltinMul:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Mul);
- break;
- case kTfLiteBuiltinSub:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Sub);
- break;
- default:
- return kTfLiteError;
- }
- ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
- armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- if (!tfLiteNodeParameters)
- {
- // No Activation
- return kTfLiteOk;
- }
- // Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
deleted file mode 100644
index 4be6fba82e..0000000000
--- a/delegate/src/ElementwiseUnary.hpp
+++ /dev/null
@@ -1,91 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <armnn/utility/Assert.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- armnn::UnaryOperation unaryOperation)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
- tfLiteContext,
- IsElementwiseUnarySupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
deleted file mode 100644
index e79133e15c..0000000000
--- a/delegate/src/Fill.hpp
+++ /dev/null
@@ -1,114 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitFillOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteFillOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- switch(tfLiteFillOperatorCode)
- {
- case kTfLiteBuiltinFill:
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- break;
- default:
- return kTfLiteError;
- }
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteFillOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteFillTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteFillTensor, tfLiteFillOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteFillOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::FillDescriptor descriptor;
- switch (tfLiteFillTensor.type)
- {
- case kTfLiteFloat32:
- descriptor.m_Value = tflite::GetTensorData<float>(&tfLiteFillTensor)[0];
- break;
- case kTfLiteInt32:
- descriptor.m_Value = tflite::GetTensorData<int32_t>(&tfLiteFillTensor)[0];
- break;
- default:
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: FILL value data type is not supported in operator #%d node #%d: ",
- tfLiteFillOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("FILL",
- tfLiteContext,
- IsFillSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- auto inputsTensorsProcess = ProcessInputs(layer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
deleted file mode 100644
index 1129951104..0000000000
--- a/delegate/src/FullyConnected.hpp
+++ /dev/null
@@ -1,275 +0,0 @@
-//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include "armnnUtils/TensorUtils.hpp"
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteWeightsTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteWeightsTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Check that we support fused activation before we attempt to create a layer
- auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType=kTfLiteActNone;
- if (tfLiteNodeParameters)
- {
- activationType = tfLiteNodeParameters->activation;
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
- }
-
- // Fully Connected Layer accepts two dimensional weights input
- int32_t weightsDimension = static_cast<int32_t>(weightsTensorInfo.GetNumDimensions());
- if (weightsDimension != 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dimension #$d for Fully Connected weights is not supported by Armnn"
- " in operator #%d node #%d: ", weightsDimension, operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- armnn::TensorInfo biasTensorInfo;
- if (biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
- }
- else
- {
- biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
- }
-
- armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- if (inputTensorInfo.GetNumDimensions() > 2)
- {
- // Calculate reshape to flatten to 2D [batch_size, input_size]
- std::vector<unsigned int> reshapedDimensions(2);
- reshapedDimensions[1] = weightsTensorInfo.GetShape()[1];
- reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
-
- if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ",
- reshapedDimensions[1], operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
- }
- armnn::TensorInfo reshapedOutputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
-
- if (outputTensorInfo.GetNumDimensions() > 2)
- {
- // Calculate reshape to flatten to 2D [batch_size, input_size]
- std::vector<unsigned int> reshapedDimensions(2);
- reshapedDimensions[1] = weightsTensorInfo.GetShape()[0];
- reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
-
- if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Failed to deduce output tensor shape from filter size #%d #%d node #%d: ",
- reshapedDimensions[1], operatorCode, nodeIndex);
- return kTfLiteError;
- }
- reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
- }
-
- armnn::FullyConnectedDescriptor descriptor;
- descriptor.m_TransposeWeightMatrix = true;
- descriptor.m_BiasEnabled = biasEnabled;
- descriptor.m_ConstantWeights = weightsTensorInfo.IsConstant();
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
-
- FORWARD_LAYER_SUPPORT_FUNC("FULLY_CONNECTED",
- tfLiteContext,
- IsFullyConnectedSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- reshapedTensorInfo,
- outputTensorInfo,
- weightsTensorInfo,
- biasTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(reshapedOutputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- // Add a constant layer for weights and biases if inputs are constant.
- if (weightsTensorInfo.IsConstant())
- {
- auto weightsTensor = CreateConstTensor(&tfLiteWeightsTensor,
- weightsTensorInfo);
-
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor);
-
- weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsTensorInfo);
- }
-
- if (biasEnabled)
- {
- const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(biasTensorInfo.IsConstant())
- {
- auto biasTensor = CreateConstTensor(&tfLiteBiasTensor,
- biasTensorInfo);
-
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
- ARMNN_ASSERT(biasLayer != nullptr);
-
- biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
- }
- }
-
- // The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
- {
- auto input =
- CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
- inputTensorInfo);
-
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
- inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- }
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- armnn::IConnectableLayer* reshapeLayer = nullptr;
- if (inputTensorInfo.GetNumDimensions() > 2)
- {
- // Add reshape to flatten to 2D [batch_size, input_size]
- armnn::ReshapeDescriptor reshapeDescriptor;
- reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
- reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
- ARMNN_ASSERT(reshapeLayer != nullptr);
-
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
-
- // Connect
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
-
- if (!descriptor.m_ConstantWeights)
- {
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(1));
- }
-
- if (biasEnabled && !biasTensorInfo.IsConstant())
- {
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(2));
- }
- delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[0]] = &outputSlot;
- }
-
- if (reshapeLayer == nullptr)
- {
- if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
- }
-
- if (outputTensorInfo.GetNumDimensions() > 2)
- {
- layer = AddReshapeLayer(tfLiteContext, tfLiteNode, layer, reshapedOutputTensorInfo, outputTensorInfo,
- delegateData);
- if (!layer)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Failed to add reshape for FullyConnected #%d node #%d: ",
- operatorCode,
- nodeIndex);
- return kTfLiteError;
- }
- }
-
- if (!tfLiteNodeParameters)
- {
- // No Activation
- return kTfLiteOk;
- }
-
- // Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
deleted file mode 100644
index 9125997417..0000000000
--- a/delegate/src/Gather.hpp
+++ /dev/null
@@ -1,106 +0,0 @@
-//
-// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include <algorithm>
-#include <iterator>
-#include <string>
-#include <vector>
-
-namespace armnnDelegate
-{
-TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- auto* gatherParameters = reinterpret_cast<TfLiteGatherParams*>(tfLiteNode->builtin_data);
- auto axis = gatherParameters->axis;
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
- armnn::GatherDescriptor gatherDescriptor;
- gatherDescriptor.m_Axis = axis;
-
- auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
- auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
- auto outputDimensions = outputTensorInfo.GetNumDimensions();
- if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
- {
- TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext,
- "TfLiteArmnnDelegate: Operation has invalid axis: %d. It is out of bounds [-%d, %d))",
- axis, inputDimensions, inputDimensions);
- return kTfLiteError;
- }
- if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext,
- "Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
- outputDimensions, inputDimensions, indicesDimensions);
- return kTfLiteError;
- }
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- // Check if supported
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("GATHER",
- tfLiteContext,
- IsGatherSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- indicesTensorInfo,
- outputTensorInfo,
- gatherDescriptor);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- auto inputsTensorsProcess = ProcessInputs(layer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- return Connect(layer, tfLiteNode, delegateData);
-}
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
deleted file mode 100644
index cf526e1995..0000000000
--- a/delegate/src/GatherNd.hpp
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include <algorithm>
-#include <iterator>
-#include <string>
-#include <vector>
-
-namespace armnnDelegate
-{
-TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteIndicesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteIndicesTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- // Check if supported
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("GATHER_ND",
- tfLiteContext,
- IsGatherNdSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- indicesTensorInfo,
- outputTensorInfo);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- auto inputsTensorsProcess = ProcessInputs(layer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- return Connect(layer, tfLiteNode, delegateData);
-}
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
deleted file mode 100644
index d71618ee9c..0000000000
--- a/delegate/src/LogicalBinary.hpp
+++ /dev/null
@@ -1,102 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t logicalOperatorCode,
- armnn::LogicalBinaryOperation binaryOperation)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor0, logicalOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor1, logicalOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, logicalOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Check if we need to expand the dims of any of the input tensor infos.
- // This is required for a few of the backends.
- if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
- {
- ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
- }
-
- // Setup descriptor and assign operation
- armnn::LogicalBinaryDescriptor desc;
- desc.m_Operation = binaryOperation;
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
- tfLiteContext,
- IsLogicalBinarySupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo0,
- inputTensorInfo1,
- outputTensorInfo,
- desc);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
- logicalBinaryLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(logicalBinaryLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- return Connect(logicalBinaryLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
deleted file mode 100644
index 8c1f877ec9..0000000000
--- a/delegate/src/Lstm.hpp
+++ /dev/null
@@ -1,268 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <armnn/LstmParams.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
-
- const auto nodeParams = reinterpret_cast<TfLiteLSTMParams*>(tfLiteNode->builtin_data);
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- // Set the params structure for the AddLstmLayer call
- armnn::LstmInputParams params;
-
- if (IsOptionalOperandPresent(tfLiteNode, 1))
- {
- params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1);
- }
-
- params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 2);
- params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 3);
- params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4);
-
- // Recurrent weight tensors of size {n_cell, n_output}
- if (IsOptionalOperandPresent(tfLiteNode, 5))
- {
- params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5);
- }
-
- params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 6);
- params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 7);
- params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8);
-
- // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
- if (IsOptionalOperandPresent(tfLiteNode, 9))
- {
- params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 10))
- {
- params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 11))
- {
- params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11);
- }
-
- // Gates bias tensors of size {n_cell}
- if (IsOptionalOperandPresent(tfLiteNode, 12))
- {
- params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12);
- }
-
- params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 13);
- params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 14);
- params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15);
-
- // Projection weight tensor of size {n_output, n_cell}
- if (IsOptionalOperandPresent(tfLiteNode, 16))
- {
- params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16);
- }
- // Projection bias tensor of size {n_output}
- if (IsOptionalOperandPresent(tfLiteNode, 17))
- {
- params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17);
- }
-
- // These state tensors are defined as variable tensors, and will be modified by this op.
- armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[18]]);
- armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]);
-
- // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
- if (IsOptionalOperandPresent(tfLiteNode, 20))
- {
- params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 21))
- {
- params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 22))
- {
- params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 23))
- {
- params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23);
- }
-
- // set the layer descriptor
- armnn::LstmDescriptor desc;
- desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex);
- desc.m_ClippingThresCell = nodeParams->cell_clip;
- desc.m_ClippingThresProj = nodeParams->proj_clip;
- desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
- || params.m_RecurrentToInputWeights == nullptr
- || params.m_InputGateBias == nullptr);
- desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
- desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
- desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
- || params.m_ForgetLayerNormWeights != nullptr
- || params.m_CellLayerNormWeights != nullptr
- || params.m_OutputLayerNormWeights != nullptr);
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- unsigned int batchSize = inputTensorInfo.GetShape()[0];
- unsigned int outputSize = outputTensorInfo.GetShape()[1];
- unsigned int numUnits = cellStateInInfo.GetShape()[1];
-
- armnn::DataType dataType = inputTensorInfo.GetDataType();
- float qScale = inputTensorInfo.GetQuantizationScale();
- float qOffset = inputTensorInfo.GetQuantizationOffset();
-
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
- if (!desc.m_CifgEnabled)
- {
- scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
- }
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
-
- armnn::LstmInputParamsInfo paramsInfo;
- paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
- paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
- paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
- paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
- paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
- paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
- paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
- paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
- paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
-
- if (!desc.m_CifgEnabled)
- {
- paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
- paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
- if (params.m_CellToInputWeights != nullptr)
- {
- paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
- }
- paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
- }
-
- if (desc.m_ProjectionEnabled)
- {
- paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
- if (params.m_ProjectionBias != nullptr)
- {
- paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
- }
- }
-
- if (desc.m_PeepholeEnabled)
- {
- paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
- paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
- }
-
- if (desc.m_LayerNormEnabled)
- {
- if(!desc.m_CifgEnabled)
- {
- paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
- }
- paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
- paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
- paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("LSTM",
- tfLiteContext,
- IsLstmSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputStateInInfo,
- cellStateInInfo,
- scratchBufferTensorInfo,
- outputStateOutTensorInfo,
- cellStateOutTensorInfo,
- outputInfo,
- desc,
- paramsInfo);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- layer->GetOutputSlot(0).SetTensorInfo(scratchBufferTensorInfo);
- layer->GetOutputSlot(1).SetTensorInfo(outputStateOutTensorInfo);
- layer->GetOutputSlot(2).SetTensorInfo(cellStateOutTensorInfo);
- layer->GetOutputSlot(3).SetTensorInfo(outputTensorInfo);
-
- // Connect the inputs
- // input_layer
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(0));
- // cellStateIn
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[18]]->Connect(layer->GetInputSlot(1));
- //outputStateIn
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[19]]->Connect(layer->GetInputSlot(2));
-
- // In the test_model there is only 1 Output
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(1);
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[0])] = &outputSlot;
- return kTfLiteOk;
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
deleted file mode 100644
index 31a7354382..0000000000
--- a/delegate/src/MultiLayerFacade.hpp
+++ /dev/null
@@ -1,136 +0,0 @@
-//
-// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-// NOTE: the MultiLayerFacade class is a utility class which makes a chain
-// of operators look like a single IConnectableLayer with the first
-// layer in the chain supplying the input slots and the last supplying
-// the output slots. It enables us, for example, to simulate a
-// Tensorflow Lite FloorDiv operator by chaining a Div layer followed
-// by a Floor layer and pass them as a single unit to the code that
-// connects up the graph as the delegate proceeds to build up the
-// Arm NN subgraphs.
-//
-
-#include <common/include/ProfilingGuid.hpp>
-#include <armnn/INetwork.hpp>
-
-namespace armnnDelegate
-{
-
-class MultiLayerFacade : public armnn::IConnectableLayer
-{
-public:
- MultiLayerFacade() :
- m_FirstLayer(nullptr), m_LastLayer(nullptr) {}
-
- MultiLayerFacade(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer) :
- m_FirstLayer(firstLayer), m_LastLayer(lastLayer) {}
-
- MultiLayerFacade(const MultiLayerFacade& obj) :
- m_FirstLayer(obj.m_FirstLayer), m_LastLayer(obj.m_LastLayer) {}
-
- ~MultiLayerFacade() {} // we don't own the pointers
-
- MultiLayerFacade& operator=(const MultiLayerFacade& obj)
- {
- m_FirstLayer = obj.m_FirstLayer;
- m_LastLayer = obj.m_LastLayer;
- return *this;
- }
-
- void AssignValues(armnn::IConnectableLayer* firstLayer, armnn::IConnectableLayer* lastLayer)
- {
- m_FirstLayer = firstLayer;
- m_LastLayer = lastLayer;
- }
-
- virtual const char* GetName() const override
- {
- return m_FirstLayer->GetName();
- }
-
- virtual unsigned int GetNumInputSlots() const override
- {
- return m_FirstLayer->GetNumInputSlots();
- }
-
- virtual unsigned int GetNumOutputSlots() const override
- {
- return m_LastLayer->GetNumOutputSlots();
- }
-
- virtual const armnn::IInputSlot& GetInputSlot(unsigned int index) const override
- {
- return m_FirstLayer->GetInputSlot(index);
- }
-
- virtual armnn::IInputSlot& GetInputSlot(unsigned int index) override
- {
- return m_FirstLayer->GetInputSlot(index);
- }
-
- virtual const armnn::IOutputSlot& GetOutputSlot(unsigned int index) const override
- {
- return m_LastLayer->GetOutputSlot(index);
- }
-
- virtual armnn::IOutputSlot& GetOutputSlot(unsigned int index) override
- {
- return m_LastLayer->GetOutputSlot(index);
- }
-
- virtual std::vector<armnn::TensorShape> InferOutputShapes(
- const std::vector<armnn::TensorShape>& inputShapes) const override
- {
- // NOTE: do not expect this function to be used. Likely that if it is it might need to be overridden
- // for particular sequences of operators.
- return m_FirstLayer->InferOutputShapes(inputShapes);
- }
-
- virtual LayerGuid GetGuid() const override
- {
- return m_FirstLayer->GetGuid();
- }
-
- virtual void ExecuteStrategy(armnn::IStrategy& strategy) const override
- {
- // Do not expect this function to be used so not providing an implementation
- // if an implementation is required and the chain contains more than two operators
- // would have to provide a way to record the intermediate layers so they could be
- // visited... the same applies to the BackendSelectionHint
- // below.
- }
-
- virtual void BackendSelectionHint(armnn::Optional<armnn::BackendId> backend) override
- {
- // Do not expect this function to be used so not providing an implementation
- }
-
- virtual armnn::LayerType GetType() const override
- {
- return m_FirstLayer->GetType();
- }
-
- virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
-
- void SetBackendId(const armnn::BackendId& id) override {}
-
-protected:
- /// Retrieve the handles to the constant values stored by the layer.
- /// @return A vector of the constant tensors stored by this layer.
- ConstantTensors GetConstantTensorsByRef() override { return {}; }
- ImmutableConstantTensors GetConstantTensorsByRef() const override { return {}; }
-
-private:
- armnn::IConnectableLayer* m_FirstLayer;
- armnn::IConnectableLayer* m_LastLayer;
-
- // to satisfy the GetParameters method need to hand back a NullDescriptor
- armnn::NullDescriptor m_NullDescriptor;
-};
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
deleted file mode 100644
index ef2e524369..0000000000
--- a/delegate/src/Normalization.hpp
+++ /dev/null
@@ -1,162 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::L2NormalizationDescriptor descriptor;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("L2_NORMALIZATION",
- tfLiteContext,
- IsL2NormalizationSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a L2Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-
-TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t normalizationOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, normalizationOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, normalizationOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::NormalizationDescriptor descriptor;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
- descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
- descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
-
- auto* params = reinterpret_cast<TfLiteLocalResponseNormParams*>(tfLiteNode->builtin_data);
- descriptor.m_NormSize = params->radius;
- descriptor.m_K = params->bias;
- descriptor.m_Alpha = params->alpha;
- descriptor.m_Beta = params->beta;
-
- // ArmNN expects normSize to be the full size of the normalization window
- descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("NORMALIZATION",
- tfLiteContext,
- IsNormalizationSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp
deleted file mode 100644
index 57d3b460f5..0000000000
--- a/delegate/src/Pack.hpp
+++ /dev/null
@@ -1,122 +0,0 @@
-//
-// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitPackOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- unsigned int numInputs = tfLiteNode->inputs->size;
- if (numInputs < 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Must have at least one input in (%d != %d) in node #%d",
- 1, numInputs, nodeIndex);
- return kTfLiteError;
- }
-
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- // Validate all inputs and get TensorInfo
- std::vector<armnn::TensorInfo> inputTensorInfos;
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- inputTensorInfos.emplace_back(inputTensorInfo);
- }
-
- // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
- std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
- std::transform(inputTensorInfos.begin(),
- inputTensorInfos.end(),
- std::back_inserter(inputConstTensorInfos),
- [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
-
- // Validate output and get TensorInfo
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::StackDescriptor desc;
- desc.m_NumInputs = static_cast<uint32_t>(numInputs);
-
- // Get axis from TfLite parameters
- auto* params = reinterpret_cast<TfLitePackParams*>(tfLiteNode->builtin_data);
- desc.m_Axis = static_cast<uint32_t>(params->axis);
-
- // Use the tensor shape of the first input as the "correct" input shape in the descriptor
- desc.m_InputShape = inputTensorInfos[0].GetShape();
-
- // Check if supported
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("STACK",
- tfLiteContext,
- IsStackSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputConstTensorInfos,
- outputTensorInfo,
- desc);
- };
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitPackOperator will be called again to add the layer to the network as seen below
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // The TfLite Pack operator is equivalent to the ArmNN Stack operator
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- // Connect the Constant Inputs
- auto inputsTensorsProcess = ProcessInputs(layer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
deleted file mode 100644
index 2ecf2a06d7..0000000000
--- a/delegate/src/Pad.hpp
+++ /dev/null
@@ -1,179 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitPadOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLitePadOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- switch(tfLitePadOperatorCode)
- {
- case kTfLiteBuiltinMirrorPad:
- case kTfLiteBuiltinPad:
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- break;
- case kTfLiteBuiltinPadv2:
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
- break;
- default:
- return kTfLiteError;
- }
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- const TfLiteTensor& tfLitepaddingTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
-
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLitePadOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- tfLitePadOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Get the padding data from the input tensor
- auto* paddingData = tflite::GetTensorData<int32_t>(&tfLitepaddingTensor);
-
- size_t step = 2;
- armnn::PadDescriptor descriptor;
- for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
- {
- descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
- }
-
- if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized())
- {
- descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset();
- }
- else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
- {
- const TfLiteTensor& tfLitepaddingValue = tfLiteTensors[tfLiteNode->inputs->data[2]];
- armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingValue);
- if (paddingValueTensorInfo.GetNumElements() != 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Multiple padding value are not supported in operator #%d node #%d: ",
- tfLitePadOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- // Get the padding value from the input tensor
- switch (tfLitepaddingValue.type)
- {
- case kTfLiteFloat32:
- descriptor.m_PadValue = tflite::GetTensorData<float>(&tfLitepaddingValue)[0];
- break;
- case kTfLiteUInt8:
- descriptor.m_PadValue = tflite::GetTensorData<uint8>(&tfLitepaddingValue)[0];
- break;
- case kTfLiteInt8:
- descriptor.m_PadValue = tflite::GetTensorData<int8>(&tfLitepaddingValue)[0];
- break;
- default:
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Padding value datatype is not supported in operator #%d node #%d: ",
- tfLitePadOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- }
- else if (tfLitePadOperatorCode == kTfLiteBuiltinMirrorPad)
- {
- TfLiteMirrorPaddingParams* options = reinterpret_cast<TfLiteMirrorPaddingParams*>(tfLiteNode->builtin_data);
-
-
- if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect)
- {
- descriptor.m_PaddingMode = armnn::PaddingMode::Reflect;
- }
- else if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric)
- {
- descriptor.m_PaddingMode = armnn::PaddingMode::Symmetric;
- }
- else
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: PaddingMode must be either REFLECT or SYMMETRIC in operator #%d node #%d: ",
- tfLitePadOperatorCode, nodeIndex);
- }
-
- // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
- // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
- auto inputShape = inputTensorInfo.GetShape();
- auto padList = descriptor.m_PadList;
-
- const unsigned int isReflect =
- static_cast<unsigned int>(descriptor.m_PaddingMode == armnn::PaddingMode::Reflect);
- for(unsigned int i = 0; i < padList.size(); ++i)
- {
- if(padList.at(i).first > (inputShape[i] - isReflect) ||
- padList.at(i).second > (inputShape[i] - isReflect))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Padding values must be less (Reflect) or "
- "equal (Symmetric) to the dimension size in operator #%d node #%d: ",
- tfLitePadOperatorCode, nodeIndex);
- }
- }
- }
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("PAD",
- tfLiteContext,
- IsPadSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
-
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
- padLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(padLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- return Connect(padLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
deleted file mode 100644
index 1178b6d8dc..0000000000
--- a/delegate/src/Pooling.hpp
+++ /dev/null
@@ -1,327 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <flatbuffers/flexbuffers.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLitePoolingOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLitePoolingOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- tfLitePoolingOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
- TfLiteFusedActivation activationType = kTfLiteActNone;
- if (tfLiteNodeParameters)
- {
- activationType = tfLiteNodeParameters->activation;
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- }
-
- armnn::PoolingAlgorithm poolingAlgorithm;
- switch(tfLitePoolingOperatorCode)
- {
- case kTfLiteBuiltinAveragePool2d:
- poolingAlgorithm = armnn::PoolingAlgorithm::Average;
- break;
- case kTfLiteBuiltinL2Pool2d:
- poolingAlgorithm = armnn::PoolingAlgorithm::L2;
- break;
- case kTfLiteBuiltinMaxPool2d:
- poolingAlgorithm = armnn::PoolingAlgorithm::Max;
- break;
- default:
- return kTfLiteError;
- }
-
- armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = poolingAlgorithm;
-
- descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width;
- descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height;
- descriptor.m_StrideX = tfLiteNodeParameters->stride_width;
- descriptor.m_StrideY = tfLiteNodeParameters->stride_height;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- unsigned int inputHeight = inputTensorInfo.GetShape()[1];
- unsigned int inputWidth = inputTensorInfo.GetShape()[2];
-
- CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
- descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
- CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
- descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
- tfLiteContext,
- IsPooling2dSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
- poolingLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(poolingLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- // Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
-}
-
-TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- std::string customOperatorName)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- customOperatorName.c_str(), nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- customOperatorName.c_str(), nodeIndex);
- return kTfLiteError;
- }
- // Set the input and output info
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Custom Operators are defined by the name string associated to the operator. Use this to determine
- // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
- armnn::PoolingAlgorithm poolingAlgorithm;
- if (customOperatorName == "MaxPool3D")
- {
- poolingAlgorithm = armnn::PoolingAlgorithm::Max;
- }
- else if (customOperatorName == "AveragePool3D")
- {
- poolingAlgorithm = armnn::PoolingAlgorithm::Average;
- }
- else
- {
- return kTfLiteError;
- }
- // Create the armnn pool3d descriptor and set the algorithm parsed above.
- armnn::Pooling3dDescriptor descriptor;
- descriptor.m_PoolType = poolingAlgorithm;
-
- // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration
- // used to access the custom option buffer for the operator.
- auto custom_data = tfLiteNode->custom_initial_data;
- auto custom_data_size = tfLiteNode->custom_initial_data_size;
- // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map.
- const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(custom_data),
- custom_data_size).AsMap();
- // poolDims is a vector of [ 1, Depth, Height, Width, 1 ]
- const auto poolDims = m["ksize"].AsTypedVector();
- descriptor.m_PoolWidth = poolDims[3].AsInt32();
- descriptor.m_PoolHeight = poolDims[2].AsInt32();
- descriptor.m_PoolDepth = poolDims[1].AsInt32();
-
- // strideDimes is a vector of [ 1, Z, Y, X, 1]
- const auto strideDims = m["strides"].AsTypedVector();
- descriptor.m_StrideX = strideDims[3].AsInt32();
- descriptor.m_StrideY = strideDims[2].AsInt32();
- descriptor.m_StrideZ = strideDims[1].AsInt32();
- descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
-
- unsigned int inputDepth = inputTensorInfo.GetShape()[1];
- unsigned int inputHeight = inputTensorInfo.GetShape()[2];
- unsigned int inputWidth = inputTensorInfo.GetShape()[3];
-
- // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding.
- std::string paddingStr = m["padding"].AsString().str();
- TfLitePadding padding;
- if (paddingStr == "VALID")
- {
- padding = kTfLitePaddingValid;
- }
- else if (paddingStr == "SAME")
- {
- padding = kTfLitePaddingSame;
- }
- else
- {
- padding = kTfLitePaddingUnknown;
- }
- // Calculates padding for each pooling dimension separately
- CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
- descriptor.m_PadTop, descriptor.m_PadBottom, padding);
- CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
- descriptor.m_PadLeft, descriptor.m_PadRight, padding);
- CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
- descriptor.m_PadFront, descriptor.m_PadBack, padding);
-
-
- // Check activation by parsing the string from the flexbuffer map
- std::string activationTypeStr = m["activation"].AsString().str();
- TfLiteFusedActivation activationType = kTfLiteActNone;
-
- if (activationTypeStr == "kTfLiteActRelu")
- {
- activationType = kTfLiteActRelu;
- }
- else if (activationTypeStr == "kTfLiteActReluN1To1")
- {
- activationType = kTfLiteActReluN1To1;
- }
- else if (activationTypeStr == "kTfLiteActRelu6")
- {
- activationType = kTfLiteActRelu6;
- }
- else if (activationTypeStr == "kTfLiteActTanh")
- {
- activationType = kTfLiteActTanh;
- }
- else if (activationTypeStr == "kTfLiteActSignBit")
- {
- activationType = kTfLiteActSignBit;
- }
- else if (activationTypeStr == "kTfLiteActSigmoid")
- {
- activationType = kTfLiteActSigmoid;
- }
- else
- {
- activationType = kTfLiteActNone;
- }
-
- TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
- outputTensorInfo, activationType);
- if(activationStatus != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
-
- // Validate the output info.
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
- FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
- tfLiteContext,
- IsPooling3dSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Create the Layer
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
- poolingLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(poolingLayer != nullptr);
-
- // Create and set output slots
- armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
- {
- return kTfLiteError;
- }
-
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Prelu.hpp b/delegate/src/Prelu.hpp
deleted file mode 100644
index 06e74ed635..0000000000
--- a/delegate/src/Prelu.hpp
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus ValidatePreluOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& alphaInfo,
- const armnn::TensorInfo& outputInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("PRELU",
- tfLiteContext,
- IsPreluSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo,
- alphaInfo,
- outputInfo);
- };
-
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus VisitPreluOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteAlphaTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteAlphaTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& alphaTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAlphaTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- if (!delegateData.m_Network)
- {
- return ValidatePreluOperator(delegateData,
- tfLiteContext,
- inputTensorInfo,
- alphaTensorInfo,
- outputTensorInfo);
- }
-
- armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer();
- ARMNN_ASSERT(preluLayer != nullptr);
-
- bool isConstantAlpha = tflite::IsConstantTensor(&tfLiteAlphaTensor);
-
- // Add constant layer for constant alpha
- if (isConstantAlpha)
- {
- auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, tfLiteAlphaTensor.data.data);
-
- armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor);
- ARMNN_ASSERT(constLayer != nullptr);
-
- constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
- constLayer->GetOutputSlot(0).Connect(preluLayer->GetInputSlot(1));
- }
-
- armnn::IOutputSlot& outputSlot = preluLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // Connect
- return Connect(preluLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
deleted file mode 100644
index f1192960e4..0000000000
--- a/delegate/src/Quantization.hpp
+++ /dev/null
@@ -1,171 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteDequantizeOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLiteDequantizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- tfLiteDequantizeOperatorCode, nodeIndex);
-
- return kTfLiteError;
- }
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
- tfLiteContext,
- IsDequantizeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
- dequantizeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(dequantizeLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
- delegateData,
- tfLiteContext,
- tfLiteNode);
- if (inputsTensorsProcess == kTfLiteError)
- {
- return inputsTensorsProcess;
- }
-
- return Connect(dequantizeLayer, tfLiteNode, delegateData);
-}
-
-TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteQuantizeOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- tfLiteQuantizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- tfLiteQuantizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // Only affine per-layer quantization is supported.
- if (!IsAffineQuantization(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
- tfLiteQuantizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
- tfLiteContext,
- IsQuantizeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
- quantizeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(quantizeLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- return Connect(quantizeLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
deleted file mode 100644
index 864fb7af67..0000000000
--- a/delegate/src/Redefine.hpp
+++ /dev/null
@@ -1,289 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <numeric>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitCastOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("CAST",
- tfLiteContext,
- IsCastSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo);
- };
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitCastOperator will be called again to add the layer to the network as seen further below
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a Cast layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-
-TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
- const std::vector<int32_t>& targetShape,
- armnn::ReshapeDescriptor& reshapeDesc)
-{
- std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
- const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
-
- if (stretchDim != targetShape.end())
- {
- if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end())
- {
- // Return kTfLiteError and log the error after returning
- return kTfLiteError;
- }
-
- auto targetNumElements =
- armnn::numeric_cast<unsigned int>(
- std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies<int32_t>()));
-
- auto stretchIndex = static_cast<size_t>(std::distance(targetShape.begin(), stretchDim));
- outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
- }
-
- armnn::TensorShape outputShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()),
- outputDims.data());
- reshapeDesc.m_TargetShape = outputShape;
- return kTfLiteOk;
-}
-
-TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
-
- if (numInputs == 2)
- {
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- }
- else
- {
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- }
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor0, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::ReshapeDescriptor reshapeDesc;
- std::vector<int32_t> targetShape;
-
- TfLiteReshapeParams* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
-
- // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
- // Options might be set without valid data. we need to check the dimensions are in a valid range.
- if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
- {
- for (int i=0; i < reshapeOptions->num_dimensions; ++i)
- {
- targetShape.push_back(reshapeOptions->shape[i]);
- }
- }
- else if (numInputs == 2)
- {
- // Get shape from the second input tensor
- const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- if (tfLiteShapeInputTensor.dims->size != 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Target 'shape' input is not a 1D tensor in "
- "operator #%d node #%d: Falling back to TfLiteOptions.",
- operatorCode, nodeIndex);
- }
- else
- {
- // Get the shape data out of the input tensor
- auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
- auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
- for (auto i=0; i < shapeTensorNumValues; ++i)
- {
- targetShape.push_back(*(shapeTensorDataPtr+i));
- }
- }
- }
- else
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "Target shape not defined in reshape parameters or input tensor. "
- "At least one method required in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // Use the data to create the required tensor shape.
- if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: At most one component of shape can be -1 in: "
- "operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements())
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Reshape, number of elements in output shape does not match input "
- "operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
- tfLiteContext,
- IsReshapeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo0,
- outInfo,
- reshapeDesc);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- operatorCode);
-
- return kTfLiteError;
-}
-
-TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- operatorCode);
-
- return kTfLiteError;
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
deleted file mode 100644
index 2d8b462cd2..0000000000
--- a/delegate/src/Reduce.hpp
+++ /dev/null
@@ -1,146 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t reduceOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, reduceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, reduceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // Get const axis value from model and set it to descriptor.
- const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteAxisTensor, reduceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
- auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
-
- std::vector<int32_t> axis;
- // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
- if (axisTensorData != nullptr)
- {
- for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
- {
- axis.emplace_back(axisTensorData[i]);
- }
- }
- else
- {
- for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
- {
- axis.push_back(i);
- }
- }
-
- // Convert the axis to unsigned int and remove duplicates.
- unsigned int rank = inputTensorInfo.GetNumDimensions();
- std::set<unsigned int> uniqueAxis;
- std::transform(axis.begin(),
- axis.end(),
- std::inserter(uniqueAxis, uniqueAxis.begin()),
- [rank](int i)->unsigned int{ return (i + rank) % rank; });
-
- armnn::ReduceDescriptor desc;
- desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
-
- auto* reducerParameters = reinterpret_cast<TfLiteReducerParams*>(tfLiteNode->builtin_data);
- desc.m_KeepDims = reducerParameters->keep_dims;
- if (reduceOperatorCode == kTfLiteBuiltinReduceMax)
- {
- desc.m_ReduceOperation = armnn::ReduceOperation::Max;
- }
- else if (reduceOperatorCode == kTfLiteBuiltinReduceMin)
- {
- desc.m_ReduceOperation = armnn::ReduceOperation::Min;
- }
- else if (reduceOperatorCode == kTfLiteBuiltinSum)
- {
- desc.m_ReduceOperation = armnn::ReduceOperation::Sum;
- }
- else if (reduceOperatorCode == kTfLiteBuiltinReduceProd)
- {
- desc.m_ReduceOperation = armnn::ReduceOperation::Prod;
- }
- else
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Unsupported Reduction Operator #%d node #%d: ",
- reduceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("REDUCE",
- tfLiteContext,
- IsReduceSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- desc);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add an Reduce layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
deleted file mode 100644
index 370f1ab2d2..0000000000
--- a/delegate/src/Resize.hpp
+++ /dev/null
@@ -1,205 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <armnn/Descriptors.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
-
-namespace armnnDelegate
-{
-
-
-
-TfLiteStatus ValidateResizeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& outputInfo,
- const armnn::ResizeDescriptor& descriptor)
-{
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("RESIZE",
- tfLiteContext,
- IsResizeSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo,
- outputInfo,
- descriptor);
-
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t resizeOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- // The first input contains the data of the image that should be resized [batch, height, width, channels]
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // The second input contains a size tensor. The size tensor contains two integer values
- // that describe the new height and width of the image [new_height, new_width]
- const TfLiteTensor& tfLiteSizeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (IsDynamicTensor(tfLiteSizeTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
- resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // The output tensor should have the shape [batch, new_height, new_width, channels]
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
- resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- std::string layerName("Resize");
-
- // Fill descriptor
- armnn::ResizeDescriptor desc;
- switch (resizeOperatorCode)
- {
- case kTfLiteBuiltinResizeBilinear:
- {
- desc.m_Method = armnn::ResizeMethod::Bilinear;
-
- layerName += "Bilinear:" + std::to_string(nodeIndex);
-
- TfLiteResizeBilinearParams* biliniarOptions =
- reinterpret_cast<TfLiteResizeBilinearParams*>(tfLiteNode->builtin_data);
-
- desc.m_AlignCorners = biliniarOptions->align_corners;
- desc.m_HalfPixelCenters = biliniarOptions->half_pixel_centers;
- break;
- }
- case kTfLiteBuiltinResizeNearestNeighbor:
- {
- desc.m_Method = armnn::ResizeMethod::NearestNeighbor;
- layerName += "NearestNeighbor:" + std::to_string(nodeIndex);
-
- TfLiteResizeNearestNeighborParams* nearestNeighborOptions =
- reinterpret_cast<TfLiteResizeNearestNeighborParams*>(tfLiteNode->builtin_data);
-
- desc.m_AlignCorners = nearestNeighborOptions->align_corners;
- desc.m_HalfPixelCenters = nearestNeighborOptions->half_pixel_centers;
- break;
- }
- default:
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Unknown TfLite built in operation for Resize. Given operator: #%d node #%d: ",
- resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- }
-
- // In armnn the values of the size input tensor [new_hight, new_width] is saved in the operator
- // descriptor. We have to read it from the input tensor and write it to the descriptor.
-
- auto* sizeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteSizeTensor);
- auto sizeTensorNumDimensions = tfLiteSizeTensor.dims->size;
- // The size tensor is only a 1D tensor -> [new_hight, new width]
- if (sizeTensorNumDimensions != 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation is not allowed to be a "
- "dynamic tensor. Operator: #%d node #%d: ",
- resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // Get number of values in the size tensor
- auto sizeTensorNumValues = tfLiteSizeTensor.dims->data[0];
- if (sizeTensorNumValues == 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation is not allowed to be a "
- "dynamic tensor. Operator: #%d node #%d: ",
- resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- else if (sizeTensorNumValues != 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Size-Input-Tensor of the Resize operation requires to "
- "have a dimension of 2 [new_hight, new width] but a tensor with a dimension of #%d was given. "
- "Operator: #%d node #%d: ",
- sizeTensorNumValues, resizeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- // get size tensor data
- std::vector<int32_t> sizeTensorData(sizeTensorDataPtr, sizeTensorDataPtr+sizeTensorNumValues);
-
- desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
- desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
- desc.m_DataLayout = armnn::DataLayout::NHWC;
-
- // No network pointer indicates that only support for this operator should be checked
- if (!delegateData.m_Network)
- {
- return ValidateResizeOperator(delegateData,
- tfLiteContext,
- inputTensorInfo,
- outputTensorInfo,
- desc);
- }
-
-
- armnn::IConnectableLayer* resizeLayer = nullptr;
- resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
-
- armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- ARMNN_ASSERT(resizeLayer != nullptr);
-
- return Connect(resizeLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Round.hpp b/delegate/src/Round.hpp
deleted file mode 100644
index 7a060b1d8f..0000000000
--- a/delegate/src/Round.hpp
+++ /dev/null
@@ -1,71 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "SharedFunctions.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitFloorOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- // NOTE: looks like the outputTensorInfo is the only thing that is required for the case
- // where we are adding the floor layer so maybe move the other stuff inside the
- // if !delegateData block for efficiency.
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitFloorOperator will be called again to add the layer to the network as seen further below
- if (!delegateData.m_Network)
- {
- return ValidateFloorOperator(delegateData, tfLiteContext, inputTensorInfo, outputTensorInfo);
- }
-
- // Add a Floor layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer();
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
deleted file mode 100644
index d797563ab5..0000000000
--- a/delegate/src/Shape.hpp
+++ /dev/null
@@ -1,95 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <numeric>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(tfLiteNode->builtin_data);
- if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: output_type data type is not supported in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("SHAPE",
- tfLiteContext,
- IsShapeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo);
- };
-
- // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
- // support for the operator
- // If supported, VisitShapeOperator will be called again to add the layer to the network as seen further below
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a Shape layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
deleted file mode 100644
index fef970173e..0000000000
--- a/delegate/src/SharedFunctions.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-
-#include "SharedFunctions.hpp"
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputTensorInfo,
- const armnn::TensorInfo& outputTensorInfo)
-{
- bool isSupported = false;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("FLOOR",
- tfLiteContext,
- IsFloorSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputTensorInfo,
- outInfo);
- };
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& outputInfo,
- TfLiteFusedActivation activationType)
-{
- armnn::ActivationDescriptor activationDesc;
-
- switch (activationType)
- {
- case kTfLiteActNone:
- {
- // No Activation
- return kTfLiteOk;
- }
- case kTfLiteActRelu:
- {
- activationDesc.m_Function = armnn::ActivationFunction::ReLu;
- break;
- }
-// The name of kTfLiteActRelu1 changed after TF Lite v2.3
-#if defined(ARMNN_POST_TFLITE_2_3)
- case kTfLiteActReluN1To1:
-#else
- case kTfLiteActRelu1:
-#endif
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = -1.0f;
- break;
- }
- case kTfLiteActRelu6:
- {
- activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
- activationDesc.m_A = 6.0f;
- activationDesc.m_B = 0.0f;
- break;
- }
- case kTfLiteActSigmoid:
- {
- activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
- break;
- }
- case kTfLiteActTanh:
- {
- activationDesc.m_Function = armnn::ActivationFunction::TanH;
- activationDesc.m_A = 1.0f;
- activationDesc.m_B = 1.0f;
- break;
- }
- default:
- return kTfLiteError;
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
-
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
- tfLiteContext,
- IsActivationSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo,
- outputInfo,
- activationDesc);
- };
- validateFunc(outputInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-
-} // namespace armnnDelegate
-
diff --git a/delegate/src/SharedFunctions.hpp b/delegate/src/SharedFunctions.hpp
deleted file mode 100644
index b03a63ded9..0000000000
--- a/delegate/src/SharedFunctions.hpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn_delegate.hpp>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputTensorInfo,
- const armnn::TensorInfo& outputTensorInfo);
-
-TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& outputInfo,
- TfLiteFusedActivation activationType);
-
-} // namespace armnnDelegate
-
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
deleted file mode 100644
index f19e3327e4..0000000000
--- a/delegate/src/Slice.hpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t sliceOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- // Read inputs [input, begin, size]
- int numInputs = tfLiteNode->inputs->size;
- std::vector<const TfLiteTensor*> tfLiteInputs;
- tfLiteInputs.reserve(numInputs);
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- for (int i = 0; i < numInputs; i++)
- {
- const TfLiteTensor* inputTensor = &tfLiteTensors[tfLiteNode->inputs->data[i]];
- tfLiteInputs.push_back(inputTensor);
- if (!IsValid(tfLiteContext, *inputTensor, sliceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- }
-
- // We save the begin and size tensors in our descriptor. Therefore we have to read those values from inputs
- int inputRank = tfLiteInputs[0]->dims->size;
- auto ReadInt32Input = [&](int inputIndex, std::vector<uint32_t>& outputData) -> TfLiteStatus
- {
- if (tfLiteInputs[inputIndex]->type != kTfLiteInt32)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Begin- and Size-Tensors of the Slice operation need to "
- "be of type int32. Operator: #%d node #%d: ",
- sliceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- int rank = tfLiteInputs[inputIndex]->dims->size;
- if (rank != 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Begin- and Size-Tensors of the Slice operation need to "
- "be a 1D-Tensor. Operator: #%d node #%d: ",
- sliceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- int numValues = tfLiteInputs[inputIndex]->dims->data[0];
- if (numValues != inputRank)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The number of values in the Begin- and Size-Tensors of the "
- "Slice operation need to be equal to the rank of the Input-Tensor. Operator: #%d node #%d: ",
- sliceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- // return tensor data
- auto* tensorDataPtr = tflite::GetTensorData<uint32_t>(tfLiteInputs[inputIndex]);
- outputData.assign(tensorDataPtr, tensorDataPtr+numValues);
- return kTfLiteOk;
- };
-
- std::vector<uint32_t> begin;
- if (ReadInt32Input(1, begin) != kTfLiteOk)
- return kTfLiteError;
- std::vector<uint32_t> size;
- if (ReadInt32Input(2, size) != kTfLiteOk)
- return kTfLiteError;
-
- // Write all data to the descriptor
- armnn::SliceDescriptor descriptor(begin, size);
-
- // Validate output
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, sliceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("SLICE",
- tfLiteContext,
- IsSliceSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a Slice layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
-
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
deleted file mode 100644
index 31c6ac3677..0000000000
--- a/delegate/src/Softmax.hpp
+++ /dev/null
@@ -1,155 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& outputTensorInfo,
- const armnn::SoftmaxDescriptor& descriptor)
-{
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("SOFTMAX",
- tfLiteContext,
- IsSoftmaxSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo,
- outputTensorInfo,
- descriptor);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-
-TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const armnn::TensorInfo& inputInfo,
- const armnn::TensorInfo& outputTensorInfo,
- const armnn::LogSoftmaxDescriptor& descriptor)
-{
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("LOG_SOFTMAX",
- tfLiteContext,
- IsLogSoftmaxSupported,
- delegateData.m_Backends,
- isSupported,
- armnn::BackendId(),
- inputInfo,
- outputTensorInfo,
- descriptor);
- return isSupported ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t softmaxOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
- nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
-
- if (!delegateData.m_Network)
- {
- switch(softmaxOperatorCode)
- {
- case kTfLiteBuiltinSoftmax:
- {
- armnn::SoftmaxDescriptor descriptor;
- auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
- descriptor.m_Beta = params->beta;
- return ValidateSoftmaxOperator(delegateData,
- tfLiteContext,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- }
- case kTfLiteBuiltinLogSoftmax:
- {
- armnn::LogSoftmaxDescriptor descriptor;
- return ValidateLogSoftmaxOperator(delegateData,
- tfLiteContext,
- inputTensorInfo,
- outputTensorInfo,
- descriptor);
- }
- default:
- return kTfLiteError;
- }
- }
-
- armnn::IConnectableLayer* softmaxLayer = nullptr;
-
- switch(softmaxOperatorCode)
- {
- case kTfLiteBuiltinSoftmax:
- {
- armnn::SoftmaxDescriptor descriptor;
- auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
- descriptor.m_Beta = params->beta;
- softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
- break;
- }
- case kTfLiteBuiltinLogSoftmax:
- {
- armnn::LogSoftmaxDescriptor descriptor;
- softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
- break;
- }
- default:
- return kTfLiteError;
- }
- ARMNN_ASSERT(softmaxLayer != nullptr);
-
- armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(softmaxLayer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
deleted file mode 100644
index cc7f03413d..0000000000
--- a/delegate/src/SpaceDepth.hpp
+++ /dev/null
@@ -1,152 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::SpaceToDepthDescriptor descriptor;
- auto* params = reinterpret_cast<TfLiteSpaceToDepthParams*>(tfLiteNode->builtin_data);
- descriptor.m_BlockSize = params->block_size;
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_DEPTH",
- tfLiteContext,
- IsSpaceToDepthSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a SpaceToDepth layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- armnn::DepthToSpaceDescriptor descriptor;
- auto* params = reinterpret_cast<TfLiteDepthToSpaceParams*>(tfLiteNode->builtin_data);
- descriptor.m_BlockSize = params->block_size;
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("DEPTH_TO_SPACE",
- tfLiteContext,
- IsDepthToSpaceSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a DepthToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
deleted file mode 100644
index b183b55c54..0000000000
--- a/delegate/src/Split.hpp
+++ /dev/null
@@ -1,347 +0,0 @@
-//
-// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <algorithm>
-#include <iterator>
-#include <vector>
-
-namespace armnnDelegate
-{
-
-constexpr unsigned int MaxNumOfTensorDimensions = 5U;
-
-TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteSplitOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
-
- auto* splitParameters = reinterpret_cast<TfLiteSplitParams*>(tfLiteNode->builtin_data);
- const unsigned int numSplits = NonNegative(splitParameters->num_splits, nodeIndex);
-
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-
- ARMNN_ASSERT(GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() == 1);
- auto* axisTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
- std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
- int32_t axis = axisTensorData[0];
-
- auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
- if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
- {
- // Square bracket denotes inclusive n while parenthesis denotes exclusive n
- // E.g. Rank 4 tensor can have axis in range [-4, 3)
- // -1 == 3, -2 == 2, -3 == 1, -4 == 0
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Operation has invalid axis: #%d. Axis must be in range [-n, n) in node #%d:",
- axis, nodeIndex);
- }
- const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
-
- std::vector<armnn::TensorInfo> outputs;
- for (unsigned int i = 0; i < numSplits; ++i)
- {
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
- }
- const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
-
- auto inputDimSize = inputTensorInfo.GetNumDimensions();
- if (inputDimSize > MaxNumOfTensorDimensions)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be greater "
- "than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
- return kTfLiteError;
- }
-
- std::vector<unsigned int> splitterDimSizes(inputDimSize);
-
- // Add current input shape to splitterDimSizes
- for (unsigned int i = 0; i < inputDimSize; ++i)
- {
- splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
- }
-
- if (splitterDimSizes[splitDim] % numSplits != 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Number of splits #%d must evenly divide the dimension #%d in node #%d: ",
- numSplits, splitterDimSizes[splitDim], nodeIndex);
- return kTfLiteError;
- }
- splitterDimSizes[splitDim] /= numSplits;
-
- armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
- for (unsigned int j = 0; j < numSplits; ++j)
- {
- // Set the size of the views.
- for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
- {
- splitDescriptor.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
- }
- splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
- }
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- // Check if supported
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("SPLIT",
- tfLiteContext,
- IsSplitterSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfos,
- splitDescriptor);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
- {
- layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
- }
-
- // Connect the input slots
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(0));
-
- // Prepare output slots
- for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
- {
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
- delegateData.m_OutputSlotForNode[
- static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfLiteSplitVOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitVOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteSplitsTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (!IsValid(tfLiteContext, tfLiteSplitsTensor, tfLiteSplitVOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitVOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteSplitsTensor);
- ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1);
- ARMNN_ASSERT(GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() == 1);
-
- auto* axisTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
- std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
- int32_t axis = axisTensorData[0];
-
- auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
- if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Operation has invalid axis: #%d. Axis must be in range [-n, n) in node #%d:",
- axis, nodeIndex);
- }
- const unsigned int splitDim = ComputeWrappedIndex(axisTensorData[0], inputTensorInfo.GetNumDimensions());
-
- auto* splitVParameters = reinterpret_cast<TfLiteSplitVParams*>(tfLiteNode->builtin_data);
- unsigned int numSplits = 0;
- if (splitVParameters)
- {
- numSplits = NonNegative(splitVParameters->num_splits, nodeIndex);
- }
- else
- {
- numSplits = splitsTensorInfo.GetNumElements();
- }
-
- if (numSplits <= 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Invalid number of splits %d in node #%d",
- numSplits, nodeIndex);
- return kTfLiteError;
- }
-
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
- std::vector<armnn::TensorInfo> outputs;
- for (unsigned int i = 0; i < numSplits; ++i)
- {
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
- }
- const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
-
- auto inputDimSize = inputTensorInfo.GetNumDimensions();
- if (inputDimSize > MaxNumOfTensorDimensions)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be greater "
- "than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
- return kTfLiteError;
- }
-
- std::vector<int32_t> splitsTensorData(numSplits);
- std::memcpy(splitsTensorData.data(), tfLiteSplitsTensor.data.data, splitsTensorInfo.GetNumBytes());
-
-
- unsigned int index = 0;
- unsigned int inferredIndex = 0;
- int numberOfInferred = 0;
- int splitSum = 0;
-
- for (auto splitData : splitsTensorData)
- {
- if (splitData < 0)
- {
- ++numberOfInferred;
- inferredIndex = index;
- }
- else
- {
- splitSum += splitData;
- }
- ++index;
- }
-
- // Check for inferred axis
- if (numberOfInferred == 0)
- {
- if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: SplitV split_sizes does not sum to the dimension of value along"
- " split_dim in node #%d", nodeIndex);
- return kTfLiteError;
- }
- }
- else if (numberOfInferred == 1)
- {
- splitsTensorData[inferredIndex] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
- }
- else
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: SplitV cannot infer split size for more than one split in node #%d",
- nodeIndex);
- return kTfLiteError;
- }
-
- armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
- unsigned int accumSplit = 0;
- for (unsigned int j = 0; j < numSplits; ++j)
- {
- unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
-
- // Set the size of the views.
- for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
- {
- unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
- if (dimIdx == splitDim)
- {
- dimSize = splitSize;
- }
- splitDescriptor.SetViewSize(j, dimIdx, dimSize);
- }
-
- splitDescriptor.SetViewOriginCoord(j, splitDim, accumSplit);
- accumSplit += splitSize;
- }
-
- armnn::BackendId setBackend;
- if (!delegateData.m_Network)
- {
- // Check if supported
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("SPLIT",
- tfLiteContext,
- IsSplitterSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfos,
- splitDescriptor);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
- {
- layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
- }
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/StridedSlice.hpp b/delegate/src/StridedSlice.hpp
deleted file mode 100644
index 998e3d3e14..0000000000
--- a/delegate/src/StridedSlice.hpp
+++ /dev/null
@@ -1,156 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t sliceOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 4, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- // Read inputs [input, begin, end, strides]
- int numInputs = tfLiteNode->inputs->size;
- std::vector<const TfLiteTensor*> tfLiteInputs;
- tfLiteInputs.reserve(numInputs);
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- for (int i = 0; i < numInputs; i++)
- {
- const TfLiteTensor* inputTensor = &tfLiteTensors[tfLiteNode->inputs->data[i]];
- tfLiteInputs.push_back(inputTensor);
- if (!IsValid(tfLiteContext, *inputTensor, sliceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- }
-
- // We save the begin, end and strides tensors in our descriptor. Therefore we have to read those values from inputs
- int inputRank = tfLiteInputs[0]->dims->size;
- auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData) -> TfLiteStatus
- {
- if (tfLiteInputs[inputIndex]->type != kTfLiteInt32)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Begin-, End- and Stride-Tensors of the StridedSlice operation need to "
- "be of type int32. Operator: #%d node #%d: ",
- sliceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- int rank = tfLiteInputs[inputIndex]->dims->size;
- if (rank != 1)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The Begin-, End- and Stride-Tensors of the StridedSlice operation need to "
- "be a 1D-Tensor. Operator: #%d node #%d: ",
- sliceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- int numValues = tfLiteInputs[inputIndex]->dims->data[0];
- if (numValues != inputRank)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The number of values in the Begin-, End- and Stride-Tensors of the "
- "StridedSlice operation need to be equal to the rank of the Input-Tensor. Operator: #%d node #%d: ",
- sliceOperatorCode, nodeIndex);
- return kTfLiteError;
- }
- // return tensor data
- auto* tensorDataPtr = tflite::GetTensorData<int32_t>(tfLiteInputs[inputIndex]);
- outputData.assign(tensorDataPtr, tensorDataPtr+numValues);
- return kTfLiteOk;
- };
-
- std::vector<int32_t> beginData;
- if (ReadInt32Input(1, beginData) != kTfLiteOk)
- return kTfLiteError;
- std::vector<int32_t> endData;
- if (ReadInt32Input(2, endData) != kTfLiteOk)
- return kTfLiteError;
- std::vector<int32_t> strideData;
- if (ReadInt32Input(3, strideData) != kTfLiteOk)
- return kTfLiteError;
-
- // parse built in options
- auto* stridedSliceParams = reinterpret_cast<TfLiteStridedSliceParams*>(tfLiteNode->builtin_data);
-
- // Write all data to the descriptor
- armnn::StridedSliceDescriptor descriptor;
- descriptor.m_Begin = std::move(beginData);
- descriptor.m_End = std::move(endData);
- descriptor.m_Stride = std::move(strideData);
- descriptor.m_BeginMask = stridedSliceParams->begin_mask;
- descriptor.m_EllipsisMask = stridedSliceParams->ellipsis_mask;
- descriptor.m_EndMask = stridedSliceParams->end_mask;
- descriptor.m_NewAxisMask = stridedSliceParams->new_axis_mask;
- descriptor.m_ShrinkAxisMask = stridedSliceParams->shrink_axis_mask;
- descriptor.m_DataLayout = armnn::DataLayout::NHWC;
-
- // Validate output
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, sliceOperatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("STRIDED_SLICE",
- tfLiteContext,
- IsStridedSliceSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Add a StridedSlice layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- // Connect
- return Connect(layer, tfLiteNode, delegateData);
-}
-
-} // namespace armnnDelegate
-
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
deleted file mode 100644
index 41178d0b59..0000000000
--- a/delegate/src/Transpose.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t tfliteTransposeOperatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor *tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor0))
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
- "operator #%d node #%d: ",
- tfliteTransposeOperatorCode, nodeIndex);
-
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (IsDynamicTensor(tfLiteInputTensor1))
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
- "operator #%d node #%d: ",
- tfliteTransposeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
- {
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
- "operator #%d node #%d: ",
- tfliteTransposeOperatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- auto* permTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteInputTensor1);
- unsigned int numEl = tfLiteInputTensor1.dims->data[0];
-
- ARMNN_ASSERT( numEl <= static_cast<int>(armnn::MaxNumOfTensorDimensions));
- ARMNN_ASSERT( tfLiteInputTensor1.dims->size == 1); // ensure only single dimension to the permutation tensor
-
- armnn::TransposeDescriptor descriptor(armnn::PermutationVector(
- reinterpret_cast<const armnn::PermutationVector::ValueType *> (permTensorDataPtr),
- static_cast<armnn::PermutationVector::SizeType>(numEl)));
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE",
- tfLiteContext,
- IsTransposeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo0,
- outputTensorInfo,
- descriptor);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
- transposeLayer->SetBackendId(setBackend);
- ARMNN_ASSERT(transposeLayer != nullptr);
- ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
-
- armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(outputTensorInfo);
-
- // try to connect the Constant Inputs if there are any
- if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
- {
- return kTfLiteError;
- }
-
- return Connect(transposeLayer, tfLiteNode, delegateData);
-}
-} // namespace armnnDelegate
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
deleted file mode 100644
index 9408397587..0000000000
--- a/delegate/src/UnidirectionalSequenceLstm.hpp
+++ /dev/null
@@ -1,302 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "DelegateUtils.hpp"
-
-#include <armnn/LstmParams.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- auto numInputs = tfLiteNode->inputs->size;
- if (numInputs < 2)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
- 2, numInputs, nodeIndex);
- return kTfLiteError;
- }
-
- const auto nodeParams = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams *>(tfLiteNode->builtin_data);
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
-
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
- // Please refer to each operand at
- // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
- armnn::LstmInputParams params;
-
- if (IsOptionalOperandPresent(tfLiteNode, 1))
- {
- params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1);
- }
-
- params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 2);
- params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 3);
- params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4);
-
- // Recurrent weight tensors of size {n_cell, n_output}
- if (IsOptionalOperandPresent(tfLiteNode, 5))
- {
- params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5);
- }
-
- params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 6);
- params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 7);
- params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8);
-
- // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
- if (IsOptionalOperandPresent(tfLiteNode, 9))
- {
- params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 10))
- {
- params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 11))
- {
- params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11);
- }
-
- // Gates bias tensors of size {n_cell}
- if (IsOptionalOperandPresent(tfLiteNode, 12))
- {
- params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12);
- }
-
- params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 13);
- params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 14);
- params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15);
-
- // Projection weight tensor of size {n_output, n_cell}
- if (IsOptionalOperandPresent(tfLiteNode, 16))
- {
- params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16);
- }
- // Projection bias tensor of size {n_output}
- if (IsOptionalOperandPresent(tfLiteNode, 17))
- {
- params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17);
- }
-
- // These state tensors are defined as variable tensors, and will be modified by this op.
- armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[18]]);
- armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]);
-
- // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
- if (IsOptionalOperandPresent(tfLiteNode, 20))
- {
- params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 21))
- {
- params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 22))
- {
- params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22);
- }
-
- if (IsOptionalOperandPresent(tfLiteNode, 23))
- {
- params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23);
- }
-
- // set the layer descriptor
- armnn::UnidirectionalSequenceLstmDescriptor desc;
- desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex);
- desc.m_ClippingThresCell = nodeParams->cell_clip;
- desc.m_ClippingThresProj = nodeParams->proj_clip;
- desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
- || params.m_RecurrentToInputWeights == nullptr
- || params.m_InputGateBias == nullptr);
- desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
- desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
- desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
- || params.m_ForgetLayerNormWeights != nullptr
- || params.m_CellLayerNormWeights != nullptr
- || params.m_OutputLayerNormWeights != nullptr);
- desc.m_TimeMajor = nodeParams->time_major;
-
- if (tfLiteNode->intermediates->size > 3 && desc.m_LayerNormEnabled)
- {
- auto inputIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
- tfLiteTensors[tfLiteNode->intermediates->data[0]]);
- auto forgetIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
- tfLiteTensors[tfLiteNode->intermediates->data[1]]);
- auto cellIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
- tfLiteTensors[tfLiteNode->intermediates->data[2]]);
- auto outputIntermediateTensorInfo = GetTensorInfoForTfLiteTensor(
- tfLiteTensors[tfLiteNode->intermediates->data[3]]);
-
- desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
- desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
- desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
- desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
- }
- else
- {
- float defaultIntermediate = std::pow(2, -12);
- desc.m_InputIntermediateScale = defaultIntermediate;
- desc.m_ForgetIntermediateScale = defaultIntermediate;
- desc.m_CellIntermediateScale = defaultIntermediate;
- desc.m_OutputIntermediateScale = defaultIntermediate;
- }
- if (tfLiteNode->intermediates->size > 4)
- {
- auto hiddentensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->intermediates->data[4]]);
- desc.m_HiddenStateScale = hiddentensorInfo.GetQuantizationScale();
- desc.m_HiddenStateZeroPoint = hiddentensorInfo.GetQuantizationOffset();
- }
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-
- unsigned int batchSize = inputTensorInfo.GetShape()[0];
- unsigned int outputSize = outputTensorInfo.GetShape()[2];
- unsigned int numUnits = cellStateInInfo.GetShape()[1];
-
- armnn::DataType dataType = inputTensorInfo.GetDataType();
- float qScale = inputTensorInfo.GetQuantizationScale();
- float qOffset = inputTensorInfo.GetQuantizationOffset();
-
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
- if (!desc.m_CifgEnabled)
- {
- scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
- }
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
- cellStateInInfo.GetDataType(),
- cellStateInInfo.GetQuantizationScale(),
- cellStateInInfo.GetQuantizationOffset());
-
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
-
- armnn::LstmInputParamsInfo paramsInfo;
- paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
- paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
- paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
- paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
- paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
- paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
- paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
- paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
- paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
-
- if (!desc.m_CifgEnabled)
- {
- paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
- paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
- if (params.m_CellToInputWeights != nullptr)
- {
- paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
- }
- paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
- }
-
- if (desc.m_ProjectionEnabled)
- {
- paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
- if (params.m_ProjectionBias != nullptr)
- {
- paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
- }
- }
-
- if (desc.m_PeepholeEnabled)
- {
- paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
- paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
- }
-
- if (desc.m_LayerNormEnabled)
- {
- if(!desc.m_CifgEnabled)
- {
- paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
- }
- paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
- paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
- paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
- }
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM",
- tfLiteContext,
- IsUnidirectionalSequenceLstmSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputStateInInfo,
- cellStateInInfo,
- outputStateOutTensorInfo,
- cellStateOutTensorInfo,
- outputInfo,
- desc,
- paramsInfo);
- };
-
- if (!delegateData.m_Network)
- {
- validateFunc(outputTensorInfo, isSupported);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
- layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
-
- layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
- layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
- layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
-
- // Connect the inputs
- // input_layer
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(layer->GetInputSlot(0));
- // cellStateIn
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[18]]->Connect(layer->GetInputSlot(1));
- //outputStateIn
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[19]]->Connect(layer->GetInputSlot(2));
-
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(2);
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[0])] = &outputSlot;
- return kTfLiteOk;
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
deleted file mode 100644
index ad541adccc..0000000000
--- a/delegate/src/Unpack.hpp
+++ /dev/null
@@ -1,214 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include "DelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <numeric>
-
-namespace armnnDelegate
-{
-
-TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-
- const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
- const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
-
- if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
-
- // Get Unpack Axis
- const auto params = reinterpret_cast<TfLiteUnpackParams*>(tfLiteNode->builtin_data);
-
- const unsigned int unpackAxis = NonNegative(params->axis, nodeIndex);
-
- const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-
- if (unpackAxis >= inputTensorInfo.GetNumDimensions())
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: The unpack axis #%d cannot be greater than or equal to "
- "the number of input dimensions #%d in operator #%d node #%d",
- unpackAxis, inputTensorInfo.GetNumDimensions(), operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // Get Unpack Num
- unsigned int unpackNum = NonNegative(params->num, nodeIndex);
-
- // If num is not defined, automatically infer from the length of the dimension axis.
- if(unpackNum == 0)
- {
- unpackNum = inputTensorInfo.GetShape()[unpackAxis];
- }
-
- // If unpack number cannot be inferred and is still zero, return kTfLiteError.
- if(unpackNum == 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Number to unpack must greater than zero in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- // Check outputs
- TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, unpackNum, nodeIndex));
-
-
- auto inputDimSize = inputTensorInfo.GetNumDimensions();
- std::vector<unsigned int> unpackDimSizes(inputDimSize);
-
- // Add current input shape to unpackDimSizes
- for (unsigned int i = 0; i < inputDimSize; ++i)
- {
- unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
- }
-
- if (unpackDimSizes[unpackAxis] != unpackNum)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Number to unpack must be the same as length "
- "of the dimension to unpack along in operator #%d node #%d: ",
- operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- unpackDimSizes[unpackAxis] /= unpackNum;
-
- armnn::SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
- for (unsigned int j = 0; j < unpackNum; ++j)
- {
- // Set the size of the views.
- for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
- {
- splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
- }
- splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
- }
-
- std::vector<armnn::TensorInfo> outputs;
- for (unsigned int i = 0; i < unpackNum; ++i)
- {
- const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
- if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
- {
- return kTfLiteError;
- }
- outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
- }
- const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
-
- // Determine the shape of the Splitter layer outputs for validation
- armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
- unpackDimSizes.data());
-
- std::vector<armnn::TensorInfo> splitterOutputs;
- for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex)
- {
- splitterOutputs.push_back(armnn::TensorInfo(splitOutShape,
- outputTensorInfos[outputIndex].get().GetDataType(),
- outputTensorInfos[outputIndex].get().GetQuantizationScale(),
- outputTensorInfos[outputIndex].get().GetQuantizationOffset()));
- }
- std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
- splitterOutputs.end());
-
- armnn::BackendId setBackendSplit;
- if (!delegateData.m_Network)
- {
- // Check if splitter is supported
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("UNPACK",
- tfLiteContext,
- IsSplitterSupported,
- delegateData.m_Backends,
- isSupported,
- setBackendSplit,
- inputTensorInfo,
- splitterOutputTensorInfos,
- splitDesc);
- return isSupported ? kTfLiteOk : kTfLiteError;
- }
-
- // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer
- // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same
- armnn::ReshapeDescriptor reshapeDescriptor;
- reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
-
- armnn::BackendId setBackendReshape;
- if (!delegateData.m_Network)
- {
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
- tfLiteContext,
- IsReshapeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackendReshape,
- splitterOutputTensorInfos[0],
- outputTensorInfos[0],
- reshapeDescriptor);
- return isSupported ? kTfLiteOk : kTfLiteError;
- };
-
- std::string splitterLayerName("Unpack Splitter");
-
- armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
- splitterLayerName.c_str());
- splitterLayer->SetBackendId(setBackendSplit);
- ARMNN_ASSERT(splitterLayer != nullptr);
-
- for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
- {
- splitterLayer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
- }
-
- // Connect the input slots
- delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(splitterLayer->GetInputSlot(0));
-
- // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
- for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
- {
- std::string reshapeLayerName("Unpack Reshape");
- armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
- reshapeLayerName.c_str());
- reshapeLayer->SetBackendId(setBackendReshape);
- ARMNN_ASSERT(reshapeLayer != nullptr);
-
- splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
- splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0));
-
- armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex];
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0);
-
- delegateData.m_OutputSlotForNode[
- static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &slot;
-
- }
-
- return kTfLiteOk;
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
deleted file mode 100644
index 4ddfc1a35f..0000000000
--- a/delegate/src/armnn_delegate.cpp
+++ /dev/null
@@ -1,1059 +0,0 @@
-//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <armnn_delegate.hpp>
-
-#include "Version.hpp"
-
-#include "Activation.hpp"
-#include "ArgMinMax.hpp"
-#include "BatchMatMul.hpp"
-#include "BatchSpace.hpp"
-#include "Comparison.hpp"
-#include "Convolution.hpp"
-#include "Control.hpp"
-#include "ElementwiseBinary.hpp"
-#include "ElementwiseUnary.hpp"
-#include "Fill.hpp"
-#include "FullyConnected.hpp"
-#include "Gather.hpp"
-#include "GatherNd.hpp"
-#include "LogicalBinary.hpp"
-#include "Lstm.hpp"
-#include "Normalization.hpp"
-#include "Pack.hpp"
-#include "Pad.hpp"
-#include "Pooling.hpp"
-#include "Prelu.hpp"
-#include "Quantization.hpp"
-#include "Redefine.hpp"
-#include "Reduce.hpp"
-#include "Resize.hpp"
-#include "Round.hpp"
-#include "Shape.hpp"
-#include "Slice.hpp"
-#include "StridedSlice.hpp"
-#include "Softmax.hpp"
-#include "SpaceDepth.hpp"
-#include "Split.hpp"
-#include "Transpose.hpp"
-#include "UnidirectionalSequenceLstm.hpp"
-#include "Unpack.hpp"
-
-#include <armnnUtils/Filesystem.hpp>
-#include <armnn/utility/Timer.hpp>
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/context_util.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <algorithm>
-#include <iostream>
-#include <sstream>
-
-namespace armnnDelegate
-{
-
-DelegateOptions TfLiteArmnnDelegateOptionsDefault()
-{
- DelegateOptions options(armnn::Compute::CpuRef);
- return options;
-}
-
-TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
-{
- auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
- return armnnDelegate->GetDelegate();
-}
-
-void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
-{
- if (tfLiteDelegate != nullptr)
- {
- delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
- }
-}
-
-TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
-{
- TfLiteIntArray* supportedOperators =
- static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
-
- // ArmNN Delegate Registration
- static const TfLiteRegistration kArmnnSubgraphRegistration = {
- // ArmnnSubgraph Init
- .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
- armnn::IgnoreUnused(length);
- const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
-
- return static_cast<void*>(ArmnnSubgraph::Create(
- tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
- },
- // ArmnnSubgraph Free
- .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
- armnn::IgnoreUnused(tfLiteContext);
- if (buffer != nullptr)
- {
- delete static_cast<ArmnnSubgraph*>(buffer);
- }
- },
- // ArmnnSubgraph Prepare
- .prepare = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
- if (tfLiteNode->user_data == nullptr)
- {
- return kTfLiteError;
- }
- return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
- },
- // ArmnnSubgraph Invoke
- .invoke = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
- if (tfLiteNode->user_data == nullptr)
- {
- return kTfLiteError;
- }
-
- return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext, tfLiteNode);
- },
-
- .profiling_string = nullptr,
- .builtin_code = kTfLiteBuiltinDelegate,
- .custom_name = "TfLiteArmNnDelegate",
- .version = 1,
- .registration_external = nullptr,
- };
-
- const TfLiteStatus status =
- tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
- tfLiteContext, kArmnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
-
- TfLiteIntArrayFree(supportedOperators);
- return status;
-
-}
-
-Delegate::Delegate(armnnDelegate::DelegateOptions options)
- : m_Options(std::move(options))
-{
- // Configures logging for ARMNN
- if (m_Options.IsLoggingEnabled())
- {
- armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
- }
- // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
- // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
- m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
- std::vector<armnn::BackendId> backends;
- if (m_Runtime)
- {
- const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
- for (auto& backend : m_Options.GetBackends())
- {
- if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
- {
- TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
- "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
- }
- else
- {
- backends.push_back(backend);
- }
- }
- }
-
- if (backends.empty())
- {
- // No known backend specified
- throw armnn::InvalidArgumentException("TfLiteArmnnDelegate: No known backend specified.");
- }
- m_Options.SetBackends(backends);
-
- TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnDelegate: Created TfLite ArmNN delegate.");
-}
-
-TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteContext)
-{
- TfLiteIntArray* executionPlan = nullptr;
- if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
- {
- TF_LITE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan.");
- return nullptr;
- }
-
- // Delegate data with null network
- DelegateData delegateData(m_Options.GetBackends());
-
- TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
- nodesToDelegate->size = 0;
-
- std::set<int32_t> unsupportedOperators;
-
- for (int i = 0; i < executionPlan->size; ++i)
- {
- const int nodeIndex = executionPlan->data[i];
-
- // If TfLite nodes can be delegated to ArmNN
- TfLiteNode* tfLiteNode = nullptr;
- TfLiteRegistration* tfLiteRegistration = nullptr;
- if (tfLiteContext->GetNodeAndRegistration(
- tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
- {
- TF_LITE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Unable to get node and registration for node %d.",
- nodeIndex);
- continue;
- }
-
- TfLiteStatus visitStatus;
-
- try
- {
- visitStatus = ArmnnSubgraph::VisitNode(
- delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
- }
- catch(std::exception& ex)
- {
- ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
- visitStatus = kTfLiteError;
- }
-
- if ( visitStatus != kTfLiteOk)
- {
- // node is not supported by ArmNN
- unsupportedOperators.insert(tfLiteRegistration->builtin_code);
- continue;
- }
-
- nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
- }
-
- for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
- {
- TF_LITE_KERNEL_LOG(tfLiteContext,
- "Operator %s [%d] is not supported by armnn_delegate.",
- tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
- *it);
- }
-
- if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
- {
- std::stringstream exMessage;
- exMessage << "TfLiteArmnnDelegate: There are unsupported operators in the model. ";
- exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
- exMessage << "This should only be disabled under test conditions.";
- throw armnn::Exception(exMessage.str());
- }
- if (nodesToDelegate->size == 0)
- {
- ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
- " The model will be executed entirely by TfLite runtime.";
- }
-
- std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
- return nodesToDelegate;
-}
-
-TfLiteDelegate* Delegate::GetDelegate()
-{
- return &m_Delegate;
-}
-
-const std::string Delegate::GetVersion()
-{
- return DELEGATE_VERSION;
-}
-
-TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const TfLiteIntArray* inputs,
- std::vector<armnn::BindingPointInfo>& inputBindings)
-{
- const size_t numInputs = static_cast<size_t>(inputs->size);
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- const int32_t tensorId = inputs->data[i];
- const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
- // Do not create bindings for constant inputs
- if (tensor.allocation_type == kTfLiteMmapRo)
- {
- continue;
- }
-
- auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
-
- auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
- outputSlot.SetTensorInfo(tensorInfo);
-
- // Store for creating connections
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
-
- inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- const TfLiteIntArray* outputs,
- std::vector<armnn::BindingPointInfo>& outputBindings)
-{
- const size_t numOutputs = static_cast<size_t>(outputs->size);
- for (unsigned int i = 0; i < numOutputs; ++i)
- {
- const int32_t tensorId = outputs->data[i];
- const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
-
- auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
-
- auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
- ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] != nullptr);
- delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
- outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
- }
-
- return kTfLiteOk;
-}
-
-ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
- const TfLiteDelegateParams* parameters,
- const Delegate* delegate)
-{
- const auto startTime = armnn::GetTimeNow();
- ARMNN_LOG(info) << "ArmnnSubgraph creation";
-
- TfLiteIntArray* executionPlan;
- if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
- {
- return nullptr;
- }
-
- // Initialize DelegateData holds network and output slots information
- DelegateData delegateData(delegate->m_Options.GetBackends());
-
- // Build ArmNN Network
- armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions;
- armnn::NetworkId networkId;
- delegateData.m_Network = armnn::INetwork::Create(networkOptions);
-
- delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(tfLiteContext->tensors_size, nullptr);
-
- std::vector<armnn::BindingPointInfo> inputBindings;
- std::vector<armnn::BindingPointInfo> outputBindings;
-
- // Add input layer
- auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
- if (status != kTfLiteOk)
- {
- throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Inputs to the network!");
- }
-
- // Parse TfLite delegate nodes to ArmNN
- const auto parseStartTime = armnn::GetTimeNow();
- for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
- {
- const int nodeIndex = parameters->nodes_to_replace->data[i];
-
- TfLiteNode* tfLiteNode = nullptr;
- TfLiteRegistration* tfLiteRegistration = nullptr;
- if (tfLiteContext->GetNodeAndRegistration(
- tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
- {
- throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to get node registration: " [ nodeIndex]);
- }
-
- if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
- {
- throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to parse node: " [ nodeIndex]);
- }
- }
- ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
- << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
-
- // Add Output layer
- status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
- if (status != kTfLiteOk)
- {
- throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Outputs to the network!");
- }
-
- // Optimize ArmNN network
- armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
- try
- {
- const auto optimizeStartTime = armnn::GetTimeNow();
- optNet = armnn::Optimize(*(delegateData.m_Network.get()),
- delegate->m_Options.GetBackends(),
- delegate->m_Runtime->GetDeviceSpec(),
- delegate->m_Options.GetOptimizerOptions());
- ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
- << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
- }
- catch (std::exception& ex)
- {
- std::stringstream exMessage;
- exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from optimize.";
- throw armnn::Exception(exMessage.str());
- }
- if (!optNet)
- {
- // Optimize failed
- throw armnn::Exception("TfLiteArmnnDelegate: Unable to optimize the network!");
- }
-
- // If set, we will serialize the optimized model into a dot file.
- const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
- if (!serializeToDotFile.empty())
- {
- ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
- fs::path filename = serializeToDotFile;
- std::fstream file(filename.c_str(), std::ios_base::out);
- optNet->SerializeToDot(file);
- }
-
- try
- {
- const auto loadStartTime = armnn::GetTimeNow();
-
- // Load graph into runtime
- std::string errorMessage;
- armnn::Status loadingStatus;
- armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
- armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
- // There's a bit of an assumption here that the delegate will only support Malloc memory source.
- if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
- {
- inputSource = armnn::MemorySource::Malloc;
- }
- if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
- {
- outputSource = armnn::MemorySource::Malloc;
- }
- armnn::INetworkProperties networkProperties(false,
- inputSource,
- outputSource,
- delegate->m_Options.GetInternalProfilingState(),
- delegate->m_Options.GetInternalProfilingDetail());
- loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
- std::move(optNet),
- errorMessage,
- networkProperties);
- if (loadingStatus != armnn::Status::Success)
- {
- // Network load failed.
- throw armnn::Exception("TfLiteArmnnDelegate: Network could not be loaded: " + errorMessage);
- }
-
- ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
- << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
- }
- catch (std::exception& ex)
- {
- std::stringstream exMessage;
- exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
- throw armnn::Exception(exMessage.str());
- }
-
- // Register debug callback function
- if (delegate->m_Options.GetDebugCallbackFunction().has_value())
- {
- delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
- }
-
- ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
- << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
-
- // Create a new SubGraph with networkId and runtime
- return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
-}
-
-TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
-{
- armnn::IgnoreUnused(tfLiteContext);
- return kTfLiteOk;
-}
-
-TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
-{
- // Prepare inputs
- armnn::InputTensors inputTensors;
- size_t inputIndex = 0;
- for (auto inputIdx : tflite::TfLiteIntArrayView(tfLiteNode->inputs))
- {
- TfLiteTensor* tensor = &tfLiteContext->tensors[inputIdx];
- if (tensor->allocation_type != kTfLiteMmapRo)
- {
- const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
- armnn::TensorInfo inputTensorInfo = inputBinding.second;
- inputTensorInfo.SetConstant(true);
- const armnn::ConstTensor inputTensor(inputTensorInfo, tensor->data.data);
- inputTensors.emplace_back(inputIdx, inputTensor);
-
- ++inputIndex;
- }
- }
-
- // Prepare outputs
- armnn::OutputTensors outputTensors;
- size_t outputIndex = 0;
- for (auto outputIdx : tflite::TfLiteIntArrayView(tfLiteNode->outputs))
- {
- const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
- TfLiteTensor* tensor = &tfLiteContext->tensors[outputIdx];
- const armnn::Tensor outputTensor(outputBinding.second, tensor->data.data);
- outputTensors.emplace_back(outputIdx, outputTensor);
-
- ++outputIndex;
- }
-
- // Run graph
- auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
- // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
- std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
- if (profiler && profiler->IsProfilingEnabled())
- {
- profiler->Print(std::cout);
- }
- return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
-}
-
-TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteRegistration* tfLiteRegistration,
- TfLiteNode* tfLiteNode,
- int nodeIndex)
-{
- switch (tfLiteRegistration->builtin_code)
- {
- case kTfLiteBuiltinCustom:
- {
-#if defined(ARMNN_POST_TFLITE_2_5)
- // Custom operators are defined by the name rather than the builtin code.
- // Parse the custom_name param in the registration to point to the correct visitor function.
- std::string customOperatorName = tfLiteRegistration->custom_name;
- if ( customOperatorName == "AveragePool3D" )
- {
- return VisitPooling3dOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- customOperatorName);
- }
- else if (customOperatorName == "MaxPool3D")
- {
- return VisitPooling3dOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- customOperatorName);
- }
-#endif
- // Invalid or unsupported custom operator
- return kTfLiteError;
- }
- case kTfLiteBuiltinAbs:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Abs);
- case kTfLiteBuiltinAdd:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinAdd);
- case kTfLiteBuiltinArgMax:
- return VisitArgMinMaxOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinArgMax);
- case kTfLiteBuiltinArgMin:
- return VisitArgMinMaxOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinArgMin);
- case kTfLiteBuiltinAveragePool2d:
- return VisitPooling2dOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinAveragePool2d);
- case kTfLiteBuiltinBatchMatmul:
- return VisitBatchMatMulOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinBatchMatmul);
- case kTfLiteBuiltinBatchToSpaceNd:
- return VisitBatchToSpaceNdOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinBatchToSpaceNd);
- case kTfLiteBuiltinCast:
- return VisitCastOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinCast);
- case kTfLiteBuiltinConcatenation:
- return VisitControlOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinConcatenation);
- case kTfLiteBuiltinConv2d:
- return VisitConvolutionOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinConv2d);
-// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
-#if defined(ARMNN_POST_TFLITE_2_5)
- case kTfLiteBuiltinConv3d:
- return VisitConvolutionOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinConv3d);
-#endif
- case kTfLiteBuiltinDepthToSpace:
- return VisitDepthToSpaceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinDepthToSpace);
- case kTfLiteBuiltinDepthwiseConv2d:
- return VisitConvolutionOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinDepthwiseConv2d);
- case kTfLiteBuiltinDequantize:
- return VisitDequantizeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinDequantize);
- case kTfLiteBuiltinDiv:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinDiv);
- case kTfLiteBuiltinElu:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinElu);
- case kTfLiteBuiltinEqual:
- return VisitComparisonOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinEqual);
- case kTfLiteBuiltinExp:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Exp);
- case kTfLiteBuiltinExpandDims:
- return VisitExpandDimsOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinExpandDims);
- case kTfLiteBuiltinFill:
- return VisitFillOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinFill);
- case kTfLiteBuiltinFloor:
- return VisitFloorOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinFloor);
- case kTfLiteBuiltinFloorDiv:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinFloorDiv);
- case kTfLiteBuiltinFullyConnected:
- return VisitFullyConnectedOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinFullyConnected);
- case kTfLiteBuiltinGather:
- return VisitGatherOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinGather);
- case kTfLiteBuiltinGatherNd:
- return VisitGatherNdOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinGatherNd);
- case kTfLiteBuiltinGreater:
- return VisitComparisonOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinGreater);
- case kTfLiteBuiltinGreaterEqual:
- return VisitComparisonOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinGreaterEqual);
- case kTfLiteBuiltinHardSwish:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinHardSwish);
- case kTfLiteBuiltinL2Normalization:
- return VisitL2NormalizationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinL2Normalization);
- case kTfLiteBuiltinL2Pool2d:
- return VisitPooling2dOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinL2Pool2d);
- case kTfLiteBuiltinLess:
- return VisitComparisonOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLess);
- case kTfLiteBuiltinLessEqual:
- return VisitComparisonOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLessEqual);
- case kTfLiteBuiltinLocalResponseNormalization:
- return VisitLocalResponseNormalizationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLocalResponseNormalization);
- case kTfLiteBuiltinLog:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Log);
- case kTfLiteBuiltinLogicalAnd:
- return VisitLogicalBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLogicalAnd,
- armnn::LogicalBinaryOperation::LogicalAnd);
- case kTfLiteBuiltinLogicalNot:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::LogicalNot);
- case kTfLiteBuiltinLogicalOr:
- return VisitLogicalBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLogicalOr,
- armnn::LogicalBinaryOperation::LogicalOr);
- case kTfLiteBuiltinLogistic:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLogistic);
- case kTfLiteBuiltinLogSoftmax:
- return VisitSoftmaxOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLogSoftmax);
- case kTfLiteBuiltinLstm:
- return VisitLstmOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinLstm);
- case kTfLiteBuiltinMaxPool2d:
- return VisitPooling2dOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinMaxPool2d);
- case kTfLiteBuiltinMaximum:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinMaximum);
- case kTfLiteBuiltinMean:
- return VisitControlOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinMean);
- case kTfLiteBuiltinMinimum:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinMinimum);
- case kTfLiteBuiltinMirrorPad:
- return VisitPadOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinMirrorPad);
- case kTfLiteBuiltinMul:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinMul);
- case kTfLiteBuiltinNeg:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Neg);
- case kTfLiteBuiltinNotEqual:
- return VisitComparisonOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinNotEqual);
- case kTfLiteBuiltinPack:
- return VisitPackOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinPack);
- case kTfLiteBuiltinPad:
- return VisitPadOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinPad);
- case kTfLiteBuiltinPadv2:
- return VisitPadOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinPadv2);
- case kTfLiteBuiltinPrelu:
- return VisitPreluOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinPrelu);
- case kTfLiteBuiltinQuantize:
- return VisitQuantizeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinQuantize);
- case kTfLiteBuiltinRank:
- return VisitControlOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinRank);
- case kTfLiteBuiltinReduceMax:
- return VisitReduceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinReduceMax);
- case kTfLiteBuiltinReduceMin:
- return VisitReduceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinReduceMin);
- case kTfLiteBuiltinReduceProd:
- return VisitReduceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinReduceProd);
- case kTfLiteBuiltinRelu:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinRelu);
- case kTfLiteBuiltinReluN1To1:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinReluN1To1);
- case kTfLiteBuiltinRelu6:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinRelu6);
- case kTfLiteBuiltinReshape:
- return VisitReshapeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinReshape);
- case kTfLiteBuiltinResizeBilinear:
- return VisitResizeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinResizeBilinear);
- case kTfLiteBuiltinResizeNearestNeighbor:
- return VisitResizeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinResizeNearestNeighbor);
- case kTfLiteBuiltinRsqrt:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Rsqrt);
- case kTfLiteBuiltinShape:
- return VisitShapeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinShape);
- case kTfLiteBuiltinSin:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Sin);
- case kTfLiteBuiltinSplit:
- return VisitSplitOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSplit);
- case kTfLiteBuiltinSplitV:
- return VisitSplitVOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSplitV);
- case kTfLiteBuiltinSqrt:
- return VisitElementwiseUnaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- armnn::UnaryOperation::Sqrt);
- case kTfLiteBuiltinSqueeze:
- return VisitSqueezeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSqueeze);
- case kTfLiteBuiltinSlice:
- return VisitSliceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSlice);
- case kTfLiteBuiltinStridedSlice:
- return VisitStridedSliceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinStridedSlice);
- case kTfLiteBuiltinSum:
- return VisitReduceOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSum);
- case kTfLiteBuiltinTranspose:
- return VisitTransposeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinTranspose);
- case kTfLiteBuiltinTransposeConv:
- return VisitConvolutionOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinTransposeConv);
- case kTfLiteBuiltinSoftmax:
- return VisitSoftmaxOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSoftmax);
- case kTfLiteBuiltinSpaceToBatchNd:
- return VisitSpaceToBatchNdOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSpaceToBatchNd);
- case kTfLiteBuiltinSpaceToDepth:
- return VisitSpaceToDepthOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSpaceToDepth);
- case kTfLiteBuiltinSub:
- return VisitElementwiseBinaryOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinSub);
- case kTfLiteBuiltinTanh:
- return VisitActivationOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinTanh);
- case kTfLiteBuiltinUnidirectionalSequenceLstm:
- return VisitUnidirectionalSequenceLstmOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinUnidirectionalSequenceLstm);
- case kTfLiteBuiltinUnpack:
- return VisitUnpackOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinUnpack);
- default:
- return kTfLiteError;
- }
-}
-
-} // armnnDelegate namespace \ No newline at end of file
diff --git a/delegate/src/armnn_external_delegate.cpp b/delegate/src/armnn_external_delegate.cpp
deleted file mode 100644
index c3875740e1..0000000000
--- a/delegate/src/armnn_external_delegate.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "armnn_delegate.hpp"
-#include <armnn/Logging.hpp>
-#include <armnn/utility/NumericCast.hpp>
-
-#include <iostream>
-#include <tensorflow/lite/minimal_logging.h>
-
-namespace tflite
-{
-
-/**
- * This file defines two symbols that need to be exported to use the TFLite external delegate provider. This is a plugin
- * that can be used for fast integration of delegates into benchmark tests and other tools. It allows loading of
- * a dynamic delegate library at runtime.
- *
- * The external delegate also has Tensorflow Lite Python bindings. Therefore the dynamic external delegate
- * can be directly used with Tensorflow Lite Python APIs.
- *
- * See tensorflow/lite/delegates/external for details or visit the tensorflow guide
- * [here](https://www.tensorflow.org/lite/performance/implementing_delegate#option_2_leverage_external_delegate)
- */
-
-extern "C"
-{
-
-/**
- * Implementation of the TfLite external delegate plugin
- *
- * For details about what options_keys and option_values are supported please see:
- * armnnDelegate::DelegateOptions::DelegateOptions(char const* const*, char const* const*,size_t,void (*)(const char*))
- */
-TfLiteDelegate* tflite_plugin_create_delegate(char** options_keys,
- char** options_values,
- size_t num_options,
- void (*report_error)(const char*))
-{
- // Returning null indicates an error during delegate creation, we initialize with that
- TfLiteDelegate* delegate = nullptr;
- try
- {
- armnnDelegate::DelegateOptions options (options_keys, options_values, num_options, (*report_error));
- delegate = TfLiteArmnnDelegateCreate(options);
- }
- catch (const std::exception& ex)
- {
- if(report_error)
- {
- report_error(ex.what());
- }
- }
- return delegate;
-}
-
-/** Destroy a given delegate plugin
- *
- * @param[in] delegate Delegate to destruct
- */
-void tflite_plugin_destroy_delegate(TfLiteDelegate* delegate)
-{
- armnnDelegate::TfLiteArmnnDelegateDelete(delegate);
-}
-
-} // extern "C"
-} // namespace tflite \ No newline at end of file
diff --git a/delegate/src/test/ActivationTest.cpp b/delegate/src/test/ActivationTest.cpp
deleted file mode 100644
index 69041d77a2..0000000000
--- a/delegate/src/test/ActivationTest.cpp
+++ /dev/null
@@ -1,299 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ActivationTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-
-void ActivationReLuTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<float> inputData = {
- -0.1f, -0.2f, -0.3f, -0.4f,
- 0.1f, 0.2f, 0.3f, 0.4f,
- -1.0f, -2.0f, -3.0f, -4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- // Calculate output values for input.
- auto f = [](float value)
- {
- return std::fmax(0.0f, value);
- };
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
- ActivationTest(tflite::BuiltinOperator_RELU,
- backends,
- inputData,
- outputExpectedData);
-}
-
-void ActivationBoundedReluTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<float> inputData = {
- -0.1f, -0.2f, -0.3f, -0.4f,
- 0.1f, 0.2f, 0.3f, 0.4f,
- -1.0f, -2.0f, -3.0f, -4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- const float a = 6.0f;
- const float b = 0.0f;
- // Calculate output values for input.
- auto f = [a, b](float value)
- {
- return std::min(a, std::max(b, value));
- };
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
- ActivationTest(tflite::BuiltinOperator_RELU6,
- backends,
- inputData,
- outputExpectedData);
-}
-
-void ActivationSigmoidTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<float> inputData = {
- -0.1f, -0.2f, -0.3f, -0.4f,
- 0.1f, 0.2f, 0.3f, 0.4f,
- -1.0f, -2.0f, -3.0f, -4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- // Calculate output values for input.
- auto f = [](float value)
- {
- return 1.0f / (1.0f + std::exp(-value));
- };
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
- ActivationTest(tflite::BuiltinOperator_LOGISTIC,
- backends,
- inputData,
- outputExpectedData);
-}
-
-
-void ActivationTanHTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<float> inputData = {
- -0.1f, -0.2f, -0.3f, -0.4f,
- 0.1f, 0.2f, 0.3f, 0.4f,
- -1.0f, -2.0f, -3.0f, -4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- // Calculate output values for input.
- auto f = [](float value)
- {
- return tanhf(value);
- };
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
- ActivationTest(tflite::BuiltinOperator_TANH,
- backends,
- inputData,
- outputExpectedData);
-}
-
-void ActivationEluTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<float> inputData = {
- -0.1f, -0.2f, -0.3f, -0.4f,
- 0.1f, 0.2f, 0.3f, 0.4f,
- -1.0f, -2.0f, -3.0f, -4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- // Calculate output values for input.
- auto f = [](float value)
- {
- if (value < 0)
- {
- // alpha * (exp(x) - 1)
- return 1 * (std::exp(value) - 1);
- }
- return value;
- };
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
- ActivationTest(tflite::BuiltinOperator_ELU,
- backends,
- inputData,
- outputExpectedData);
-}
-
-void ActivationHardSwishTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<float> inputData = {
- -0.1f, -0.2f, -0.3f, -0.4f,
- 0.1f, 0.2f, 0.3f, 0.4f,
- -1.0f, -2.0f, -3.0f, -4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- // Calculate output values for input.
- auto f = [](float x)
- {
- // Break down the calculation to help with verification.
- // hard_swish(x) = x * relu6(x+3) / 6
- // relu6(x) = min(max(x,0),6)
- float reLu6_step1 = std::max((x + 3),0.0f);
- float reLu6Complete = std::min(reLu6_step1, 6.0f);
- float hardSwish_step1 = x * reLu6Complete;
- float result = hardSwish_step1 / 6;
- return result;
- };
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
-
- ActivationTest(tflite::BuiltinOperator_HARD_SWISH,
- backends,
- inputData,
- outputExpectedData);
-}
-
-TEST_SUITE("Activation_CpuRefTests")
-{
-
-TEST_CASE ("Activation_ReLu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ActivationReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Bounded_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ActivationBoundedReluTest(backends);
-}
-
-TEST_CASE ("Activation_Sigmoid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ActivationSigmoidTest(backends);
-}
-
-TEST_CASE ("Activation_TanH_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ActivationTanHTest(backends);
-}
-
-TEST_CASE ("Activation_Elu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ActivationEluTest(backends);
-}
-
-TEST_CASE ("Activation_HardSwish_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ActivationHardSwishTest(backends);
-}
-
-}
-
-TEST_SUITE("Activation_CpuAccTests")
-{
-
-TEST_CASE ("Activation_ReLu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ActivationReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Bounded_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ActivationBoundedReluTest(backends);
-}
-
-TEST_CASE ("Activation_Sigmoid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ActivationSigmoidTest(backends);
-}
-
-TEST_CASE ("Activation_TanH_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ActivationTanHTest(backends);
-}
-
-TEST_CASE ("Activation_Elu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ActivationEluTest(backends);
-}
-
-TEST_CASE ("Activation_HardSwish_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ActivationHardSwishTest(backends);
-}
-
-}
-
-TEST_SUITE("Activation_GpuAccTests")
-{
-
-TEST_CASE ("Activation_ReLu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ActivationReLuTest(backends);
-}
-
-TEST_CASE ("Activation_Bounded_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ActivationBoundedReluTest(backends);
-}
-
-TEST_CASE ("Activation_Sigmoid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ActivationSigmoidTest(backends);
-}
-
-TEST_CASE ("Activation_TanH_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ActivationTanHTest(backends);
-}
-
-TEST_CASE ("Activation_Elu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ActivationEluTest(backends);
-}
-
-TEST_CASE ("Activation_HardSwish_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ActivationHardSwishTest(backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp
deleted file mode 100644
index 6475083da0..0000000000
--- a/delegate/src/test/ActivationTestHelper.hpp
+++ /dev/null
@@ -1,130 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& tensorShape)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
- tensorType);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
- tensorType);
-
- // create operator
- const std::vector<int> operatorInputs{0};
- const std::vector<int> operatorOutputs{1};
- flatbuffers::Offset <Operator> unaryOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
-
- const std::vector<int> subgraphInputs{0};
- const std::vector<int> subgraphOutputs{1};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&unaryOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, activationOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& inputValues,
- std::vector<float>& expectedOutputValues)
-{
- using namespace tflite;
- std::vector<int32_t> inputShape { { 4, 1, 4} };
- std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
- ::tflite::TensorType_FLOAT32,
- inputShape);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<float>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- inputShape,
- expectedOutputValues);
-
- tfLiteInterpreter.reset(nullptr);
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ArgMinMaxTest.cpp b/delegate/src/test/ArgMinMaxTest.cpp
deleted file mode 100644
index bf60a77cb2..0000000000
--- a/delegate/src/test/ArgMinMaxTest.cpp
+++ /dev/null
@@ -1,174 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ArgMinMaxTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void ArgMaxFP32Test(std::vector<armnn::BackendId>& backends, int axisValue)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 2, 4 };
- std::vector<int32_t> outputShape { 1, 3, 4 };
- std::vector<int32_t> axisShape { 1 };
-
- std::vector<float> inputValues = { 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
-
- 10.0f, 20.0f, 30.0f, 40.0f,
- 50.0f, 60.0f, 70.0f, 80.0f,
-
- 100.0f, 200.0f, 300.0f, 400.0f,
- 500.0f, 600.0f, 700.0f, 800.0f };
-
- std::vector<int32_t> expectedOutputValues = { 1, 1, 1, 1,
- 1, 1, 1, 1,
- 1, 1, 1, 1 };
-
- ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MAX,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- axisShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- axisValue,
- ::tflite::TensorType_INT32);
-}
-
-void ArgMinFP32Test(std::vector<armnn::BackendId>& backends, int axisValue)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 2, 4 };
- std::vector<int32_t> outputShape { 1, 3, 2 };
- std::vector<int32_t> axisShape { 1 };
-
- std::vector<float> inputValues = { 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
-
- 10.0f, 20.0f, 30.0f, 40.0f,
- 50.0f, 60.0f, 70.0f, 80.0f,
-
- 100.0f, 200.0f, 300.0f, 400.0f,
- 500.0f, 600.0f, 700.0f, 800.0f };
-
- std::vector<int32_t> expectedOutputValues = { 0, 0,
- 0, 0,
- 0, 0 };
-
- ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MIN,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- axisShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- axisValue,
- ::tflite::TensorType_INT32);
-}
-
-void ArgMaxUint8Test(std::vector<armnn::BackendId>& backends, int axisValue)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 1, 1, 5 };
- std::vector<int32_t> outputShape { 1, 1, 1 };
- std::vector<int32_t> axisShape { 1 };
-
- std::vector<uint8_t> inputValues = { 5, 2, 8, 10, 9 };
-
- std::vector<int32_t> expectedOutputValues = { 3 };
-
- ArgMinMaxTest<uint8_t, int32_t>(tflite::BuiltinOperator_ARG_MAX,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- axisShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- axisValue,
- ::tflite::TensorType_INT32);
-}
-
-TEST_SUITE("ArgMinMax_CpuRefTests")
-{
-
-TEST_CASE ("ArgMaxFP32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ArgMaxFP32Test(backends, 2);
-}
-
-TEST_CASE ("ArgMinFP32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ArgMinFP32Test(backends, 3);
-}
-
-TEST_CASE ("ArgMaxUint8Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ArgMaxUint8Test(backends, -1);
-}
-
-} // TEST_SUITE("ArgMinMax_CpuRefTests")
-
-TEST_SUITE("ArgMinMax_CpuAccTests")
-{
-
-TEST_CASE ("ArgMaxFP32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ArgMaxFP32Test(backends, 2);
-}
-
-TEST_CASE ("ArgMinFP32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ArgMinFP32Test(backends, 3);
-}
-
-TEST_CASE ("ArgMaxUint8Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ArgMaxUint8Test(backends, -1);
-}
-
-} // TEST_SUITE("ArgMinMax_CpuAccTests")
-
-TEST_SUITE("ArgMinMax_GpuAccTests")
-{
-
-TEST_CASE ("ArgMaxFP32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ArgMaxFP32Test(backends, 2);
-}
-
-TEST_CASE ("ArgMinFP32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ArgMinFP32Test(backends, 3);
-}
-
-TEST_CASE ("ArgMaxUint8Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ArgMaxUint8Test(backends, -1);
-}
-
-} // TEST_SUITE("ArgMinMax_GpuAccTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ArgMinMaxTestHelper.hpp b/delegate/src/test/ArgMinMaxTestHelper.hpp
deleted file mode 100644
index 3e607d6b2b..0000000000
--- a/delegate/src/test/ArgMinMaxTestHelper.hpp
+++ /dev/null
@@ -1,199 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename InputT, typename OutputT>
-std::vector<char> CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& axisTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- const std::vector<OutputT> axisValue,
- tflite::TensorType outputType,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- auto inputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- auto axisTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
- axisTensorShape.size()),
- tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("axis"));
-
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- outputType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, axisTensor, outputTensor };
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisValue.data()),
- sizeof(OutputT))));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::vector<int32_t> operatorInputs = {{ 0, 1 }};
- std::vector<int> subgraphInputs = {{ 0, 1 }};
-
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ArgMaxOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateArgMaxOptions(flatBufferBuilder, outputType).Union();
-
- if (argMinMaxOperatorCode == tflite::BuiltinOperator_ARG_MIN)
- {
- operatorBuiltinOptionsType = BuiltinOptions_ArgMinOptions;
- operatorBuiltinOptions = CreateArgMinOptions(flatBufferBuilder, outputType).Union();
- }
-
- // create operator
- const std::vector<int32_t> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> argMinMaxOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphOutputs{ 2 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&argMinMaxOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: ArgMinMax Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- argMinMaxOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename InputT, typename OutputT>
-void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- const std::vector<int32_t>& inputShape,
- const std::vector<int32_t>& axisShape,
- std::vector<int32_t>& outputShape,
- std::vector<InputT>& inputValues,
- std::vector<OutputT>& expectedOutputValues,
- OutputT axisValue,
- tflite::TensorType outputType,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateArgMinMaxTfLiteModel<InputT, OutputT>(argMinMaxOperatorCode,
- tensorType,
- inputShape,
- axisShape,
- outputShape,
- {axisValue},
- outputType,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<InputT>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<InputT>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
- }
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ArmnnDelegateTest.cpp b/delegate/src/test/ArmnnDelegateTest.cpp
deleted file mode 100644
index bc73dde2ef..0000000000
--- a/delegate/src/test/ArmnnDelegateTest.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
-#include <doctest/doctest.h>
-
-#include <armnn_delegate.hpp>
-
-#include "tensorflow/lite/kernels/builtin_op_kernels.h"
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE("ArmnnDelegate")
-{
-
-TEST_CASE ("ArmnnDelegate Registered")
-{
- using namespace tflite;
- auto tfLiteInterpreter = std::make_unique<Interpreter>();
-
- tfLiteInterpreter->AddTensors(3);
- tfLiteInterpreter->SetInputs({0, 1});
- tfLiteInterpreter->SetOutputs({2});
-
- tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
- tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
- tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
-
- tflite::ops::builtin::BuiltinOpResolver opResolver;
- const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
- tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);
-
- // Create the Armnn Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<armnn::BackendOptions> backendOptions;
- backendOptions.emplace_back(
- armnn::BackendOptions{ "BackendName",
- {
- { "Option1", 42 },
- { "Option2", true }
- }}
- );
-
- armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
-
- auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
- CHECK(status == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
-}
-
-TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered")
-{
- using namespace tflite;
- auto tfLiteInterpreter = std::make_unique<Interpreter>();
-
- tfLiteInterpreter->AddTensors(3);
- tfLiteInterpreter->SetInputs({0, 1});
- tfLiteInterpreter->SetOutputs({2});
-
- tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
- tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
- tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
-
- tflite::ops::builtin::BuiltinOpResolver opResolver;
- const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
- tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);
-
- // Create the Armnn Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-
- armnn::OptimizerOptions optimizerOptions(true, true, false, true);
-
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
-
- auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
- CHECK(status == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
-}
-
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/test/BatchMatMulTest.cpp b/delegate/src/test/BatchMatMulTest.cpp
deleted file mode 100644
index 06ad2c3be2..0000000000
--- a/delegate/src/test/BatchMatMulTest.cpp
+++ /dev/null
@@ -1,689 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "BatchMatMulTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
- void BatchMatMul2DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2, 2 };
- std::vector<int32_t> RHSInputShape { 2, 2 };
- std::vector<int32_t> outputShape { 2, 2 };
-
- std::vector<float> LHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<float> RHSInputValues = { 5, 6,
- 7, 8 };
-
- std::vector<float> expectedOutputValues = { 19, 22,
- 43, 50 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
- void BatchMatMul2DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2, 2 };
- std::vector<int32_t> RHSInputShape { 2, 2 };
- std::vector<int32_t> outputShape { 2, 2 };
-
- std::vector<int8_t> LHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<int8_t> RHSInputValues = { 5, 6,
- 7, 8 };
-
- std::vector<int8_t> expectedOutputValues = { 19, 22,
- 43, 50 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 1,2,2 };
- std::vector<int32_t> RHSInputShape { 1,2,2 };
- std::vector<int32_t> outputShape { 1,2,2 };
-
- std::vector<float> LHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<float> RHSInputValues = { 5, 6,
- 7, 8 };
-
- std::vector<float> expectedOutputValues = { 19, 22,
- 43, 50 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 1,2,2 };
- std::vector<int32_t> RHSInputShape { 1,2,2 };
- std::vector<int32_t> outputShape { 1,2,2 };
-
- std::vector<int8_t> LHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<int8_t> RHSInputValues = { 5, 6,
- 7, 8 };
-
- std::vector<int8_t> expectedOutputValues = { 19, 22,
- 43, 50 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul4DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 1,1,2,2 };
- std::vector<int32_t> RHSInputShape { 1,1,2,2 };
- std::vector<int32_t> outputShape { 1,1,2,2 };
-
- std::vector<float> LHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<float> RHSInputValues = { 5, 6,
- 7, 8 };
-
- std::vector<float> expectedOutputValues = { 19, 22,
- 43, 50 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul4DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 1,1,2,2};
- std::vector<int32_t> RHSInputShape { 1,1,2,2 };
- std::vector<int32_t> outputShape { 1,1,2,2 };
-
- std::vector<int8_t> LHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<int8_t> RHSInputValues = { 5, 6,
- 7, 8 };
-
- std::vector<int8_t> expectedOutputValues = { 19, 22,
- 43, 50 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3DFp32BatchTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,2,2 };
- std::vector<int32_t> RHSInputShape { 2,2,2 };
- std::vector<int32_t> outputShape { 2,2,2 };
-
- std::vector<float> LHSInputValues = { 1, 2,
- 3, 4,
-
- 9, 10,
- 11, 12 };
-
- std::vector<float> RHSInputValues = { 5, 6,
- 7, 8,
-
- 13, 14,
- 15, 16 };
-
- std::vector<float> expectedOutputValues = { 19, 22,
- 43, 50,
-
- 267, 286,
- 323, 346 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3DInt8BatchTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,2,2 };
- std::vector<int32_t> RHSInputShape { 2,2,2 };
- std::vector<int32_t> outputShape { 2,2,2 };
-
- std::vector<int8_t> LHSInputValues = { 1, 2,
- 3, 4,
-
- 9, 10,
- 11, 12 };
-
- std::vector<int8_t> RHSInputValues = { 5, 6,
- 7, 8,
-
- 1, 2,
- 3, 4 };
-
- std::vector<int8_t> expectedOutputValues = { 19, 22,
- 43, 50,
-
- 39, 58,
- 47, 70 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,2,2 };
- std::vector<int32_t> RHSInputShape { 2,2 };
- std::vector<int32_t> outputShape { 2,2,2 };
-
- std::vector<float> LHSInputValues = { 1, 2,
- 3, 4,
-
- 9, 10,
- 11, 12 };
-
- std::vector<float> RHSInputValues = { 13, 14,
- 15, 16 };
-
- std::vector<float> expectedOutputValues = { 43, 46,
- 99, 106,
-
- 267, 286,
- 323, 346 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,2,2 };
- std::vector<int32_t> RHSInputShape { 1,2,2 };
- std::vector<int32_t> outputShape { 2,2,2 };
-
- std::vector<int8_t> LHSInputValues = { 1, 2,
- 3, 4,
-
- 9, 10,
- 11, 12 };
-
- std::vector<int8_t> RHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<int8_t> expectedOutputValues = { 7, 10,
- 15, 22,
-
- 39, 58,
- 47, 70 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3D2DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,2,2 };
- std::vector<int32_t> RHSInputShape { 2,2 };
- std::vector<int32_t> outputShape { 2,2,2 };
-
- std::vector<float> LHSInputValues = { 1, 2,
- 3, 4,
-
- 9, 10,
- 11, 12 };
-
- std::vector<float> RHSInputValues = { 13, 14,
- 15, 16 };
-
- std::vector<float> expectedOutputValues = { 43, 46,
- 99, 106,
-
- 267, 286,
- 323, 346 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul3D2DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,2,2 };
- std::vector<int32_t> RHSInputShape { 2,2 };
- std::vector<int32_t> outputShape { 2,2,2 };
-
- std::vector<int8_t> LHSInputValues = { 1, 2,
- 3, 4,
-
- 9, 10,
- 11, 12 };
-
- std::vector<int8_t> RHSInputValues = { 1, 2,
- 3, 4 };
-
- std::vector<int8_t> expectedOutputValues = { 7, 10,
- 15, 22,
-
- 39, 58,
- 47, 70 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul2DFp32TinyTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 1,1 };
- std::vector<int32_t> RHSInputShape { 1,1 };
- std::vector<int32_t> outputShape { 1,1 };
-
- std::vector<float> LHSInputValues = { 3 };
-
- std::vector<float> RHSInputValues = { 5 };
-
- std::vector<float> expectedOutputValues = { 15 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
- void BatchMatMul2DInt8TinyTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 1,1 };
- std::vector<int32_t> RHSInputShape { 1,1 };
- std::vector<int32_t> outputShape { 1,1 };
-
- std::vector<int8_t> LHSInputValues = { 3 };
-
- std::vector<int8_t> RHSInputValues = { 5 };
-
- std::vector<int8_t> expectedOutputValues = { 15 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMulNonSquareFp32Test(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,5,3 };
- std::vector<int32_t> RHSInputShape { 2,3,4 };
- std::vector<int32_t> outputShape { 2,5,4 };
-
- std::vector<float> LHSInputValues = { 8, 8, 4,
- 6, 1, 3,
- 8, 8, 3,
- 8, 9, 8,
- 5, 4, 4,
-
- 1, 8, 5,
- 7, 1, 1,
- 8, 7, 9,
- 3, 2, 7,
- 8, 5, 3 };
-
- std::vector<float> RHSInputValues = { 6, 2, 3, 2,
- 6, 2, 2, 8,
- 3, 7, 8, 1,
-
- 7, 2, 9, 5,
- 2, 3, 1, 3,
- 2, 7, 7, 5 };
-
- std::vector<float> expectedOutputValues = { 108, 60, 72, 84,
- 51, 35, 44, 23,
- 105, 53, 64, 83,
- 126, 90, 106, 96,
- 66, 46, 55, 46,
-
- 33, 61, 52, 54,
- 53, 24, 71, 43,
- 88, 100, 142, 106,
- 39, 61, 78, 56,
- 72, 52, 98, 70 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMulNonSquareInt8Test(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 2,5,3 };
- std::vector<int32_t> RHSInputShape { 2,3,4 };
- std::vector<int32_t> outputShape { 2,5,4 };
-
- std::vector<int8_t> LHSInputValues = { 8, 8, 4,
- 6, 1, 3,
- 8, 8, 3,
- 8, 9, 8,
- 5, 4, 4,
-
- 1, 8, 5,
- 7, 1, 1,
- 8, 7, 9,
- 3, 2, 7,
- 8, 5, 3 };
-
- std::vector<int8_t> RHSInputValues = { 6, 2, 3, 2,
- 6, 2, 2, 8,
- 3, 7, 8, 1,
-
- 7, 2, 3, 5,
- 2, 3, 1, 3,
- 2, 7, 7, 5 };
-
- std::vector<int8_t> expectedOutputValues = { 108, 60, 72, 84,
- 51, 35, 44, 23,
- 105, 53, 64, 83,
- 126, 90, 106, 96,
- 66, 46, 55, 46,
-
- 33, 61, 46, 54,
- 53, 24, 29, 43,
- 88, 100, 94, 106,
- 39, 61, 60, 56,
- 72, 52, 50, 70 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- false,
- false);
- }
-
- void BatchMatMul2DFp32SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 3,3 };
- std::vector<int32_t> RHSInputShape { 3,3 };
- std::vector<int32_t> outputShape { 3,3 };
-
- std::vector<float> LHSInputValues = { 3, 1, 1,
- 1, 3, -1,
- 2, 4, 1 };
-
- std::vector<float> RHSInputValues = { 1, 0, 0,
- 0, 1, 0,
- 0, 0, 1 };
-
- std::vector<float> expectedOutputValues = { 3, 1, 2,
- 1, 3, 4,
- 1, -1, 1 };
-
- BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_FLOAT32,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- true,
- false);
- }
-
- void BatchMatMul2DInt8SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
- {
- // Set input data
- std::vector<int32_t> LHSInputShape { 3,3 };
- std::vector<int32_t> RHSInputShape { 3,3 };
- std::vector<int32_t> outputShape { 3,3 };
-
- std::vector<int8_t> LHSInputValues = { 3, 1, 1,
- 1, 3, -1,
- 2, 4, 1 };
-
- std::vector<int8_t> RHSInputValues = { 1, 0, 0,
- 0, 1, 0,
- 0, 0, 1 };
-
- std::vector<int8_t> expectedOutputValues = { 3, 1, 2,
- 1, 3, 4,
- 1, -1, 1 };
-
- BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
- ::tflite::TensorType_INT8,
- backends,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- LHSInputValues,
- RHSInputValues,
- expectedOutputValues,
- true,
- false);
- }
-
- TEST_SUITE("BATCH_MATMUL_CpuRefTests")
- {
- TEST_CASE("BATCH_MATMUL_Fp32_CpuRefTests")
- {
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BatchMatMul2DFp32SimpleTest (backends);
- BatchMatMul3DFp32SimpleTest (backends);
- BatchMatMul4DFp32SimpleTest (backends);
- BatchMatMul3DFp32BatchTest (backends);
- BatchMatMul3DFp32BroadcastTest (backends);
- BatchMatMul3D2DFp32BroadcastTest (backends);
- BatchMatMul2DFp32TinyTest (backends);
- BatchMatMulNonSquareFp32Test (backends);
- BatchMatMul2DFp32SimpleAdjointTest(backends);
- }
-
- TEST_CASE("BATCH_MATMUL_Int8_CpuRefTests")
- {
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BatchMatMul2DInt8SimpleTest (backends);
- BatchMatMul3DInt8SimpleTest (backends);
- BatchMatMul4DInt8SimpleTest (backends);
- BatchMatMul3DInt8BatchTest (backends);
- BatchMatMul3DInt8BroadcastTest (backends);
- BatchMatMul3D2DInt8BroadcastTest (backends);
- BatchMatMul2DInt8TinyTest (backends);
- BatchMatMulNonSquareInt8Test (backends);
- BatchMatMul2DInt8SimpleAdjointTest(backends);
- }
- }
-
- TEST_SUITE("BATCH_MATMUL_CpuAccTests")
- {
- TEST_CASE("BATCH_MATMUL_Fp32_CpuAccTests")
- {
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- BatchMatMul2DFp32SimpleTest (backends);
- BatchMatMul3DFp32SimpleTest (backends);
- BatchMatMul4DFp32SimpleTest (backends);
- BatchMatMul3DFp32BatchTest (backends);
- BatchMatMul3DFp32BroadcastTest (backends);
- BatchMatMul3D2DFp32BroadcastTest (backends);
- BatchMatMul2DFp32TinyTest (backends);
- BatchMatMulNonSquareFp32Test (backends);
- BatchMatMul2DFp32SimpleAdjointTest(backends);
- }
- }
- TEST_SUITE("BATCH_MATMUL_GpuAccTests")
- {
- TEST_CASE("BATCH_MATMUL_Fp32_GpuAccTests")
- {
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- BatchMatMul2DFp32SimpleTest (backends);
- BatchMatMul3DFp32SimpleTest (backends);
- BatchMatMul4DFp32SimpleTest (backends);
- BatchMatMul3DFp32BatchTest (backends);
- BatchMatMul3DFp32BroadcastTest (backends);
- BatchMatMul3D2DFp32BroadcastTest (backends);
- BatchMatMul2DFp32TinyTest (backends);
- BatchMatMulNonSquareFp32Test (backends);
- BatchMatMul2DFp32SimpleAdjointTest(backends);
- }
- }
-}
diff --git a/delegate/src/test/BatchMatMulTestHelper.hpp b/delegate/src/test/BatchMatMulTestHelper.hpp
deleted file mode 100644
index 7437064a42..0000000000
--- a/delegate/src/test/BatchMatMulTestHelper.hpp
+++ /dev/null
@@ -1,208 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateBatchMatMulTfLiteModel(
- tflite::BuiltinOperator bmmOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& LHSInputTensorShape,
- const std::vector <int32_t>& RHSInputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- bool adjX = false,
- bool adjY = false,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(LHSInputTensorShape.data(),
- LHSInputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("LHSInput"),
- quantizationParameters);
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(RHSInputTensorShape.data(),
- RHSInputTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("RHSInput"),
- quantizationParameters);
-
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder,
- adjX,
- adjY).Union();
-
- const std::vector<int32_t> operatorInputs{{0, 1}};
- const std::vector<int32_t> operatorOutputs{2};
- flatbuffers::Offset <Operator> bmmOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{{0, 1}};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
- subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&bmmOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& LHSInputShape,
- std::vector<int32_t>& RHSInputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& LHSInputValues,
- std::vector<T>& RHSInputValues,
- std::vector<T>& expectedOutputValues,
- bool adjX = false,
- bool adjY = false,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
- tensorType,
- LHSInputShape,
- RHSInputShape,
- outputShape,
- adjX,
- adjY,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
- auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
- auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
- for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
- {
- tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
- }
- for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
- {
- tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
- }
-
- auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
- auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
- for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
- {
- armnnDelegateLHSInputData[i] = LHSInputValues[i];
- }
- for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
- {
- armnnDelegateRHSInputData[i] = RHSInputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
- outputShape, expectedOutputValues);
-}
-
-} // anonymous namespace
-
-
-
-
diff --git a/delegate/src/test/BatchSpaceTest.cpp b/delegate/src/test/BatchSpaceTest.cpp
deleted file mode 100644
index 47eba452e7..0000000000
--- a/delegate/src/test/BatchSpaceTest.cpp
+++ /dev/null
@@ -1,299 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "BatchSpaceTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// BatchToSpaceND Operator
-void BatchToSpaceNDFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 4, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f };
- std::vector<float> expectedOutputValues { 1.0f, 2.0f, 3.0f, 4.0f };
-
- std::vector<unsigned int> blockShape({2, 2});
- std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
-
- BatchSpaceTest<float>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- blockShape,
- crops,
- expectedOutputValues);
-}
-
-void BatchToSpaceNDFp32BatchOneTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f };
- std::vector<float> expectedOutputValues { 1.0f, 2.0f, 3.0f, 4.0f };
-
- std::vector<unsigned int> blockShape({1, 1});
- std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
-
- BatchSpaceTest<float>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- blockShape,
- crops,
- expectedOutputValues);
-}
-
-void BatchToSpaceNDUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 4, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7 };
- std::vector<uint8_t> expectedOutputValues { 1, 2, 3, 4, 5, 6, 7 };
-
- std::vector<unsigned int> blockShape({2, 2});
- std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
-
- BatchSpaceTest<uint8_t>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- blockShape,
- crops,
- expectedOutputValues);
-}
-
-// SpaceToBatchND Operator
-void SpaceToBatchNDFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 4, 1, 1, 1 };
-
- std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f };
- std::vector<float> expectedOutputValues { 1.0f, 2.0f, 3.0f, 4.0f };
-
- std::vector<unsigned int> blockShape({2, 2});
- std::vector<std::pair<unsigned int, unsigned int>> padding = {{0, 0}, {0, 0}};
-
- BatchSpaceTest<float>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- blockShape,
- padding,
- expectedOutputValues);
-}
-
-void SpaceToBatchNDFp32PaddingTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 2, 4, 1 };
- std::vector<int32_t> expectedOutputShape { 8, 1, 3, 1 };
-
- std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f, 12.0f,
- 13.0f, 14.0f, 15.0f, 16.0f };
-
- std::vector<float> expectedOutputValues { 0.0f, 1.0f, 3.0f, 0.0f, 9.0f, 11.0f,
- 0.0f, 2.0f, 4.0f, 0.0f, 10.0f, 12.0f,
- 0.0f, 5.0f, 7.0f, 0.0f, 13.0f, 15.0f,
- 0.0f, 6.0f, 8.0f, 0.0f, 14.0f, 16.0f };
-
- std::vector<unsigned int> blockShape({2, 2});
- std::vector<std::pair<unsigned int, unsigned int>> padding = {{0, 0}, {2, 0}};
-
- BatchSpaceTest<float>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- blockShape,
- padding,
- expectedOutputValues);
-}
-
-void SpaceToBatchNDUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 2, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 4, 1, 1, 3 };
-
- std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7 };
- std::vector<uint8_t> expectedOutputValues { 1, 2, 3, 4, 5, 6, 7 };
-
- std::vector<unsigned int> blockShape({2, 2});
- std::vector<std::pair<unsigned int, unsigned int>> padding = {{0, 0}, {0, 0}};
-
- BatchSpaceTest<uint8_t>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- blockShape,
- padding,
- expectedOutputValues);
-}
-
-// BatchToSpaceND Tests
-TEST_SUITE("BatchToSpaceND_CpuAccTests")
-{
-
-TEST_CASE ("BatchToSpaceND_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- BatchToSpaceNDFp32Test(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- BatchToSpaceNDFp32BatchOneTest(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- BatchToSpaceNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("BatchToSpaceND_GpuAccTests")
-{
-
-TEST_CASE ("BatchToSpaceND_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- BatchToSpaceNDFp32Test(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- BatchToSpaceNDFp32BatchOneTest(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- BatchToSpaceNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("BatchToSpaceND_CpuRefTests")
-{
-
-TEST_CASE ("BatchToSpaceND_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BatchToSpaceNDFp32Test(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BatchToSpaceNDFp32BatchOneTest(backends);
-}
-
-TEST_CASE ("BatchToSpaceND_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BatchToSpaceNDUint8Test(backends);
-}
-
-}
-
-// SpaceToBatchND Tests
-TEST_SUITE("SpaceToBatchND_CpuAccTests")
-{
-
-TEST_CASE ("SpaceToBatchND_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SpaceToBatchNDFp32Test(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Fp32_Padding_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SpaceToBatchNDFp32PaddingTest(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SpaceToBatchNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("SpaceToBatchND_GpuAccTests")
-{
-
-TEST_CASE ("SpaceToBatchND_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SpaceToBatchNDFp32Test(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Fp32_Padding_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SpaceToBatchNDFp32PaddingTest(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SpaceToBatchNDUint8Test(backends);
-}
-
-}
-
-TEST_SUITE("SpaceToBatchND_CpuRefTests")
-{
-
-TEST_CASE ("SpaceToBatchND_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SpaceToBatchNDFp32Test(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Fp32_Padding_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SpaceToBatchNDFp32PaddingTest(backends);
-}
-
-TEST_CASE ("SpaceToBatchND_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SpaceToBatchNDUint8Test(backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/BatchSpaceTestHelper.hpp b/delegate/src/test/BatchSpaceTestHelper.hpp
deleted file mode 100644
index d4fa9837e8..0000000000
--- a/delegate/src/test/BatchSpaceTestHelper.hpp
+++ /dev/null
@@ -1,218 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpaceOperatorCode,
- tflite::TensorType tensorType,
- std::vector<int32_t>& inputTensorShape,
- std::vector <int32_t>& outputTensorShape,
- std::vector<unsigned int>& blockData,
- std::vector<std::pair<unsigned int, unsigned int>>& cropsPadData,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
- buffers[1] = CreateBuffer(flatBufferBuilder);
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(blockData.data()),
- sizeof(int32_t) * blockData.size()));
- buffers[3] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cropsPadData.data()),
- sizeof(int64_t) * cropsPadData.size()));
- buffers[4] = CreateBuffer(flatBufferBuilder);
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::string cropsOrPadding =
- batchSpaceOperatorCode == tflite::BuiltinOperator_BATCH_TO_SPACE_ND ? "crops" : "padding";
-
- std::vector<int32_t> blockShape { 2 };
- std::vector<int32_t> cropsOrPaddingShape { 2, 2 };
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(blockShape.data(),
- blockShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("block"),
- quantizationParameters);
-
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(cropsOrPaddingShape.data(),
- cropsOrPaddingShape.size()),
- ::tflite::TensorType_INT32,
- 3,
- flatBufferBuilder.CreateString(cropsOrPadding),
- quantizationParameters);
-
- // Create output tensor
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 4,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // Create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
- flatbuffers::Offset<void> operatorBuiltinOptions = 0;
- switch (batchSpaceOperatorCode)
- {
- case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
- {
- operatorBuiltinOptionsType = tflite::BuiltinOptions_BatchToSpaceNDOptions;
- operatorBuiltinOptions = CreateBatchToSpaceNDOptions(flatBufferBuilder).Union();
- break;
- }
- case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
- {
- operatorBuiltinOptionsType = tflite::BuiltinOptions_SpaceToBatchNDOptions;
- operatorBuiltinOptions = CreateSpaceToBatchNDOptions(flatBufferBuilder).Union();
- break;
- }
- default:
- break;
- }
-
- const std::vector<int> operatorInputs{ {0, 1, 2} };
- const std::vector<int> operatorOutputs{ 3 };
- flatbuffers::Offset <Operator> batchSpaceOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1, 2} };
- const std::vector<int> subgraphOutputs{ 3 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&batchSpaceOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: BatchSpace Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, batchSpaceOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& inputValues,
- std::vector<unsigned int>& blockShapeValues,
- std::vector<std::pair<unsigned int, unsigned int>>& cropsPaddingValues,
- std::vector<T>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode,
- tensorType,
- inputShape,
- expectedOutputShape,
- blockShapeValues,
- cropsPaddingValues,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/CastTest.cpp b/delegate/src/test/CastTest.cpp
deleted file mode 100644
index a637071ffc..0000000000
--- a/delegate/src/test/CastTest.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CastTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void CastUint8ToFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape {1, 3, 2, 3};
-
- std::vector<uint8_t> inputValues { 1, 3, 1, 3, 1, 3, 1, 3, 1,
- 3, 1, 3, 1, 2, 1, 3, 1, 3 };
-
- std::vector<float> expectedOutputValues { 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f,
- 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
-
- CastTest<uint8_t, float>(::tflite::TensorType_UINT8,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void CastInt32ToFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape {1, 3, 2, 3};
-
- std::vector<int32_t> inputValues { -1, -3, -1, -3, -1, -3, -1, -3, 1,
- 3, 1, 3, 1, 2, 1, 3, 1, 3 };
-
- std::vector<float> expectedOutputValues { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
- 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
-
- CastTest<int32_t, float>(::tflite::TensorType_INT32,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- inputValues,
- expectedOutputValues);
-}
-
-// CAST Test Suite
-TEST_SUITE("CAST_CpuRefTests")
-{
-
-TEST_CASE ("CAST_UINT8_TO_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- CastUint8ToFp32Test(backends);
-}
-
-TEST_CASE ("CAST_INT32_TO_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- CastInt32ToFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("CAST_CpuAccTests")
-{
-
-TEST_CASE ("CAST_INT32_TO_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- CastInt32ToFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("CAST_GpuAccTests")
-{
-
-TEST_CASE ("CAST_INT32_TO_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- CastInt32ToFp32Test(backends);
-}
-
-}
-// End of CAST Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/CastTestHelper.hpp b/delegate/src/test/CastTestHelper.hpp
deleted file mode 100644
index 0448e65856..0000000000
--- a/delegate/src/test/CastTestHelper.hpp
+++ /dev/null
@@ -1,159 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
- tflite::TensorType outputTensorType,
- const std::vector <int32_t>& tensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- inputTensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- outputTensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- const std::vector<int32_t> operatorInputs({0});
- const std::vector<int32_t> operatorOutputs({1});
-
- flatbuffers::Offset<Operator> castOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_CastOptions,
- CreateCastOptions(flatBufferBuilder).Union());
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: CAST Operator Model");
- flatbuffers::Offset<OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CAST);
-
- const std::vector<int32_t> subgraphInputs({0});
- const std::vector<int32_t> subgraphOutputs({1});
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&castOperator, 1));
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T, typename K>
-void CastTest(tflite::TensorType inputTensorType,
- tflite::TensorType outputTensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& shape,
- std::vector<T>& inputValues,
- std::vector<K>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
- outputTensorType,
- shape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
- armnnDelegate,
- shape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/ComparisonTest.cpp b/delegate/src/test/ComparisonTest.cpp
deleted file mode 100644
index 95bfe21d27..0000000000
--- a/delegate/src/test/ComparisonTest.cpp
+++ /dev/null
@@ -1,844 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ComparisonTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void EqualFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
- 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
- };
-
- std::vector<float> input1Values =
- {
- 1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f,
- 5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f
- };
-
- std::vector<bool> expectedOutputValues =
- {
- 1, 1, 1, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 1
- };
-
-
- ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void EqualBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<float> input0Values
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f,
- 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
- };
- std::vector<float> input1Values { 4.f, 5.f, 6.f };
- // Set output data
- std::vector<bool> expectedOutputValues
- {
- 0, 0, 0, 1, 1, 1,
- 0, 0, 0, 0, 0, 0
- };
- ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void EqualInt32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
-
- std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { 1, 0, 0, 1 };
-
- ComparisonTest<int32_t>(tflite::BuiltinOperator_EQUAL,
- ::tflite::TensorType_INT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void NotEqualFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
- 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
- };
-
- std::vector<float> input1Values =
- {
- 1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f,
- 5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f
- };
-
- std::vector<bool> expectedOutputValues =
- {
- 0, 0, 0, 0, 1, 1, 1, 1,
- 1, 1, 1, 1, 0, 0, 0, 0
- };
-
- ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void NotEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<float> input0Values
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f,
- 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
- };
- std::vector<float> input1Values { 4.f, 5.f, 6.f };
- // Set output data
- std::vector<bool> expectedOutputValues
- {
- 1, 1, 1, 0, 0, 0,
- 1, 1, 1, 1, 1, 1
- };
- ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void NotEqualInt32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
-
- std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { 0, 1, 1, 0 };
-
- ComparisonTest<int32_t>(tflite::BuiltinOperator_NOT_EQUAL,
- ::tflite::TensorType_INT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void GreaterFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> input0Values = { 1, 5, 6, 4 };
-
- std::vector<float> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { 0, 1, 0, 0 };
-
- ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void GreaterBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<float> input0Values
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f,
- 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
- };
- std::vector<float> input1Values { 4.f, 5.f, 6.f };
-
- std::vector<bool> expectedOutputValues
- {
- 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1
- };
- ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void GreaterInt32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
-
- std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { 0, 1, 0, 0 };
-
- ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER,
- ::tflite::TensorType_INT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void GreaterEqualFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
-
- std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
-
- std::vector<bool> expectedOutputValues = { true, true, false, true };
-
- ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void GreaterEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<float> input0Values
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f,
- 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
- };
- std::vector<float> input1Values { 4.f, 5.f, 6.f };
- // Set output data
- std::vector<bool> expectedOutputValues
- {
- 0, 0, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1
- };
-
- ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void GreaterEqualInt32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
-
- std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { 1, 1, 0, 0 };
-
- ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER_EQUAL,
- ::tflite::TensorType_INT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LessFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
-
- std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
-
- std::vector<bool> expectedOutputValues = { false, false, true, false };
-
- ComparisonTest<float>(tflite::BuiltinOperator_LESS,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LessBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<float> input0Values
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f,
- 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
- };
- std::vector<float> input1Values { 4.f, 5.f, 6.f };
-
- std::vector<bool> expectedOutputValues
- {
- true, true, true, false, false, false,
- false, false, false, false, false, false
- };
-
- ComparisonTest<float>(tflite::BuiltinOperator_LESS,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LessInt32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
-
- std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { false, false, true, true };
-
- ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS,
- ::tflite::TensorType_INT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LessEqualFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
-
- std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
-
- std::vector<bool> expectedOutputValues = { true, false, true, true };
-
- ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LessEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<float> input0Values
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f,
- 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
- };
- std::vector<float> input1Values { 4.f, 5.f, 6.f };
-
- std::vector<bool> expectedOutputValues
- {
- true, true, true, true, true, true,
- false, false, false, false, false, false
- };
-
- ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LessEqualInt32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
-
- std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
-
- std::vector<bool> expectedOutputValues = { true, false, true, true };
-
- ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS_EQUAL,
- ::tflite::TensorType_INT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-TEST_SUITE("Comparison_CpuRefTests")
-{
-
-TEST_CASE ("EQUAL_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- EqualFP32Test(backends);
-}
-
-TEST_CASE ("EQUAL_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- EqualBroadcastTest(backends);
-}
-
-TEST_CASE ("EQUAL_INT32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- EqualInt32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- NotEqualFP32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- NotEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_INT32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- NotEqualInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- GreaterFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- GreaterBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_INT32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- GreaterInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- GreaterEqualFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- GreaterEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_INT32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- GreaterEqualInt32Test(backends);
-}
-
-TEST_CASE ("LESS_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LessFP32Test(backends);
-}
-
-TEST_CASE ("LESS_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LessBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_INT32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LessInt32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LessEqualFP32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LessEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_INT32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LessEqualInt32Test(backends);
-}
-} // End TEST_SUITE("Comparison_CpuRefTests")
-
-
-
-TEST_SUITE("Comparison_GpuAccTests")
-{
-
-TEST_CASE ("EQUAL_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- EqualFP32Test(backends);
-}
-
-TEST_CASE ("EQUAL_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- EqualBroadcastTest(backends);
-}
-
-TEST_CASE ("EQUAL_INT32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- EqualInt32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- NotEqualFP32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- NotEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_INT32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- NotEqualInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- GreaterFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- GreaterBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_INT32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- GreaterInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- GreaterEqualFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- GreaterEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_INT32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- GreaterEqualInt32Test(backends);
-}
-
-TEST_CASE ("LESS_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LessFP32Test(backends);
-}
-
-TEST_CASE ("LESS_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LessBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_INT32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LessInt32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LessEqualFP32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LessEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_INT32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LessEqualInt32Test(backends);
-}
-
-} // End TEST_SUITE("Comparison_GpuAccTests")
-
-
-TEST_SUITE("Comparison_CpuAccTests")
-{
-
-TEST_CASE ("EQUAL_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- EqualFP32Test(backends);
-}
-
-TEST_CASE ("EQUAL_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- EqualBroadcastTest(backends);
-}
-
-TEST_CASE ("EQUAL_INT32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- EqualInt32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- NotEqualFP32Test(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- NotEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("NOT_EQUAL_INT32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- NotEqualInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- GreaterFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- GreaterBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_INT32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- GreaterInt32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- GreaterEqualFP32Test(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- GreaterEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("GREATER_EQUAL_INT32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- GreaterEqualInt32Test(backends);
-}
-
-TEST_CASE ("LESS_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LessFP32Test(backends);
-}
-
-TEST_CASE ("LESS_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LessBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_INT32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LessInt32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LessEqualFP32Test(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LessEqualBroadcastTest(backends);
-}
-
-TEST_CASE ("LESS_EQUAL_INT32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LessEqualInt32Test(backends);
-}
-
-} // End TEST_SUITE("Comparison_CpuAccTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ComparisonTestHelper.hpp b/delegate/src/test/ComparisonTestHelper.hpp
deleted file mode 100644
index db337f9f8a..0000000000
--- a/delegate/src/test/ComparisonTestHelper.hpp
+++ /dev/null
@@ -1,238 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateComparisonTfLiteModel(tflite::BuiltinOperator comparisonOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& input0TensorShape,
- const std::vector <int32_t>& input1TensorShape,
- const std::vector <int32_t>& outputTensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
- input0TensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input_0"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
- input1TensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("input_1"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- ::tflite::TensorType_BOOL,
- 3);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union();
- switch (comparisonOperatorCode)
- {
- case BuiltinOperator_EQUAL:
- {
- operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;
- operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_NOT_EQUAL:
- {
- operatorBuiltinOptionsType = BuiltinOptions_NotEqualOptions;
- operatorBuiltinOptions = CreateNotEqualOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_GREATER:
- {
- operatorBuiltinOptionsType = BuiltinOptions_GreaterOptions;
- operatorBuiltinOptions = CreateGreaterOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_GREATER_EQUAL:
- {
- operatorBuiltinOptionsType = BuiltinOptions_GreaterEqualOptions;
- operatorBuiltinOptions = CreateGreaterEqualOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_LESS:
- {
- operatorBuiltinOptionsType = BuiltinOptions_LessOptions;
- operatorBuiltinOptions = CreateLessOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_LESS_EQUAL:
- {
- operatorBuiltinOptionsType = BuiltinOptions_LessEqualOptions;
- operatorBuiltinOptions = CreateLessEqualOptions(flatBufferBuilder).Union();
- break;
- }
- default:
- break;
- }
- const std::vector<int32_t> operatorInputs{0, 1};
- const std::vector<int32_t> operatorOutputs{2};
- flatbuffers::Offset <Operator> comparisonOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&comparisonOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Comparison Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, comparisonOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& input0Shape,
- std::vector<int32_t>& input1Shape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& input0Values,
- std::vector<T>& input1Values,
- std::vector<bool>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode,
- tensorType,
- input0Shape,
- input1Shape,
- outputShape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- tfLiteDelageInput0Data[i] = input0Values[i];
- }
-
- auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
- auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
- for (unsigned int i = 0; i < input1Values.size(); ++i)
- {
- tfLiteDelageInput1Data[i] = input1Values[i];
- }
-
- auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- armnnDelegateInput0Data[i] = input0Values[i];
- }
-
- auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
- for (unsigned int i = 0; i < input1Values.size(); ++i)
- {
- armnnDelegateInput1Data[i] = input1Values[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues , armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues , tfLiteDelageOutputData , expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelageOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ControlTest.cpp b/delegate/src/test/ControlTest.cpp
deleted file mode 100644
index 18bbc5a9a8..0000000000
--- a/delegate/src/test/ControlTest.cpp
+++ /dev/null
@@ -1,420 +0,0 @@
-//
-// Copyright © 2020,2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ControlTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// CONCATENATION Operator
-void ConcatUint8TwoInputsTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 2 };
- std::vector<int32_t> expectedOutputShape { 4, 2 };
-
- // Set input and output data
- std::vector<std::vector<uint8_t>> inputValues;
- std::vector<uint8_t> inputValue1 { 0, 1, 2, 3 }; // Lower bounds
- std::vector<uint8_t> inputValue2 { 252, 253, 254, 255 }; // Upper bounds
- inputValues.push_back(inputValue1);
- inputValues.push_back(inputValue2);
-
- std::vector<uint8_t> expectedOutputValues { 0, 1, 2, 3, 252, 253, 254, 255 };
-
- ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void ConcatInt16TwoInputsTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 2 };
- std::vector<int32_t> expectedOutputShape { 4, 2 };
-
- std::vector<std::vector<int16_t>> inputValues;
- std::vector<int16_t> inputValue1 { -32768, -16384, -1, 0 };
- std::vector<int16_t> inputValue2 { 1, 2, 16384, 32767 };
- inputValues.push_back(inputValue1);
- inputValues.push_back(inputValue2);
-
- std::vector<int16_t> expectedOutputValues { -32768, -16384, -1, 0, 1, 2, 16384, 32767};
-
- ConcatenationTest<int16_t>(tflite::BuiltinOperator_CONCATENATION,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void ConcatFloat32TwoInputsTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 2 };
- std::vector<int32_t> expectedOutputShape { 4, 2 };
-
- std::vector<std::vector<float>> inputValues;
- std::vector<float> inputValue1 { -127.f, -126.f, -1.f, 0.f };
- std::vector<float> inputValue2 { 1.f, 2.f, 126.f, 127.f };
- inputValues.push_back(inputValue1);
- inputValues.push_back(inputValue2);
-
- std::vector<float> expectedOutputValues { -127.f, -126.f, -1.f, 0.f, 1.f, 2.f, 126.f, 127.f };
-
- ConcatenationTest<float>(tflite::BuiltinOperator_CONCATENATION,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void ConcatThreeInputsTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 2 };
- std::vector<int32_t> expectedOutputShape { 6, 2 };
-
- std::vector<std::vector<uint8_t>> inputValues;
- std::vector<uint8_t> inputValue1 { 0, 1, 2, 3 };
- std::vector<uint8_t> inputValue2 { 125, 126, 127, 128 };
- std::vector<uint8_t> inputValue3 { 252, 253, 254, 255 };
- inputValues.push_back(inputValue1);
- inputValues.push_back(inputValue2);
- inputValues.push_back(inputValue3);
-
- std::vector<uint8_t> expectedOutputValues { 0, 1, 2, 3, 125, 126, 127, 128, 252, 253, 254, 255 };
-
- ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void ConcatAxisTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 4 };
-
- std::vector<std::vector<uint8_t>> inputValues;
- std::vector<uint8_t> inputValue1 { 0, 1, 2, 3 };
- std::vector<uint8_t> inputValue3 { 252, 253, 254, 255 };
- inputValues.push_back(inputValue1);
- inputValues.push_back(inputValue3);
-
- std::vector<uint8_t> expectedOutputValues { 0, 1, 252, 253, 2, 3, 254, 255 };
-
- ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 2);
-}
-
-// MEAN Operator
-void MeanUint8KeepDimsTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 3 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1 };
-
- std::vector<uint8_t> input0Values { 5, 10, 15 }; // Inputs
- std::vector<int32_t> input1Values { 1 }; // Axis
-
- std::vector<uint8_t> expectedOutputValues { 10 };
-
- MeanTest<uint8_t>(tflite::BuiltinOperator_MEAN,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- true);
-}
-
-void MeanUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 2, 2 };
-
- std::vector<uint8_t> input0Values { 5, 10, 15, 20 }; // Inputs
- std::vector<int32_t> input1Values { 0 }; // Axis
-
- std::vector<uint8_t> expectedOutputValues { 5, 10, 15, 20 };
-
- MeanTest<uint8_t>(tflite::BuiltinOperator_MEAN,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- false);
-}
-
-void MeanFp32KeepDimsTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 2 };
-
- std::vector<float> input0Values { 1.0f, 1.5f, 2.0f, 2.5f }; // Inputs
- std::vector<int32_t> input1Values { 1 }; // Axis
-
- std::vector<float> expectedOutputValues { 1.5f, 2.0f };
-
- MeanTest<float>(tflite::BuiltinOperator_MEAN,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- true);
-}
-
-void MeanFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 1 };
-
- std::vector<float> input0Values { 1.0f, 1.5f, 2.0f, 2.5f }; // Inputs
- std::vector<int32_t> input1Values { 2 }; // Axis
-
- std::vector<float> expectedOutputValues { 1.25f, 2.25f };
-
- MeanTest<float>(tflite::BuiltinOperator_MEAN,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- false);
-}
-
-// CONCATENATION Tests.
-TEST_SUITE("Concatenation_CpuAccTests")
-{
-
-TEST_CASE ("Concatenation_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- ConcatUint8TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Int16_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- ConcatInt16TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- ConcatFloat32TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Three_Inputs_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- ConcatThreeInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Axis_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- ConcatAxisTest(backends);
-}
-
-}
-
-TEST_SUITE("Concatenation_GpuAccTests")
-{
-
-TEST_CASE ("Concatenation_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- ConcatUint8TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Int16_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- ConcatInt16TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- ConcatFloat32TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Three_Inputs_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- ConcatThreeInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Axis_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- ConcatAxisTest(backends);
-}
-
-}
-
-TEST_SUITE("Concatenation_CpuRefTests")
-{
-
-TEST_CASE ("Concatenation_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- ConcatUint8TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Int16_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- ConcatInt16TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Float32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- ConcatFloat32TwoInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Three_Inputs_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- ConcatThreeInputsTest(backends);
-}
-
-TEST_CASE ("Concatenation_Axis_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- ConcatAxisTest(backends);
-}
-
-}
-
-// MEAN Tests
-TEST_SUITE("Mean_CpuAccTests")
-{
-
-TEST_CASE ("Mean_Uint8_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- MeanUint8KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- MeanUint8Test(backends);
-}
-
-TEST_CASE ("Mean_Fp32_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- MeanFp32KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- MeanFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("Mean_GpuAccTests")
-{
-
-TEST_CASE ("Mean_Uint8_KeepDims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- MeanUint8KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- MeanUint8Test(backends);
-}
-
-TEST_CASE ("Mean_Fp32_KeepDims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- MeanFp32KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- MeanFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("Mean_CpuRefTests")
-{
-
-TEST_CASE ("Mean_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- MeanUint8KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- MeanUint8Test(backends);
-}
-
-TEST_CASE ("Mean_Fp32_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- MeanFp32KeepDimsTest(backends);
-}
-
-TEST_CASE ("Mean_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- MeanFp32Test(backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ControlTestHelper.hpp b/delegate/src/test/ControlTestHelper.hpp
deleted file mode 100644
index 3e427e60c5..0000000000
--- a/delegate/src/test/ControlTestHelper.hpp
+++ /dev/null
@@ -1,346 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
- tflite::TensorType tensorType,
- std::vector<int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- const int32_t inputTensorNum,
- int32_t axis = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::vector<int32_t> operatorInputs{};
- const std::vector<int32_t> operatorOutputs{inputTensorNum};
- std::vector<int> subgraphInputs{};
- const std::vector<int> subgraphOutputs{inputTensorNum};
-
- std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
- for (int i = 0; i < inputTensorNum; ++i)
- {
- tensors[i] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input" + std::to_string(i)),
- quantizationParameters);
-
- // Add number of inputs to vector.
- operatorInputs.push_back(i);
- subgraphInputs.push_back(i);
- }
-
- // Create output tensor
- tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ConcatenationOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateConcatenationOptions(flatBufferBuilder, axis).Union();
-
- flatbuffers::Offset <Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Concatenation Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-std::vector<char> CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
- tflite::TensorType tensorType,
- std::vector<int32_t>& input0TensorShape,
- std::vector<int32_t>& input1TensorShape,
- const std::vector <int32_t>& outputTensorShape,
- std::vector<int32_t>& axisData,
- const bool keepDims,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
- sizeof(int32_t) * axisData.size()));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
- input0TensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
- input1TensorShape.size()),
- ::tflite::TensorType_INT32,
- 1,
- flatBufferBuilder.CreateString("axis"),
- quantizationParameters);
-
- // Create output tensor
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator. Mean uses ReducerOptions.
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ReducerOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateReducerOptions(flatBufferBuilder, keepDims).Union();
-
- const std::vector<int> operatorInputs{ {0, 1} };
- const std::vector<int> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1} };
- const std::vector<int> subgraphOutputs{ 2 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Mean Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShapes,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<std::vector<T>>& inputValues,
- std::vector<T>& expectedOutputValues,
- int32_t axis = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
- tensorType,
- inputShapes,
- expectedOutputShape,
- inputValues.size(),
- axis,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for all input tensors.
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- // Get single input tensor and assign to interpreters.
- auto inputTensorValues = inputValues[i];
- armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-template <typename T>
-void MeanTest(tflite::BuiltinOperator controlOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& input0Shape,
- std::vector<int32_t>& input1Shape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& input0Values,
- std::vector<int32_t>& input1Values,
- std::vector<T>& expectedOutputValues,
- const bool keepDims,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
- tensorType,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input1Values,
- keepDims,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/Convolution2dTest.cpp b/delegate/src/test/Convolution2dTest.cpp
deleted file mode 100644
index 10510792a1..0000000000
--- a/delegate/src/test/Convolution2dTest.cpp
+++ /dev/null
@@ -1,489 +0,0 @@
-//
-// Copyright © 2020,2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvolutionTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 5, 5, 1 };
- std::vector<int32_t> filterShape { 1, 3, 3, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- static std::vector<float> inputValues =
- {
- 1, 5, 2, 3, 5,
- 8, 7, 3, 6, 3,
- 3, 3, 9, 1, 9,
- 4, 1, 8, 1, 3,
- 6, 8, 1, 9, 2
- };
-
- std::vector<float> filterValues =
- {
- 4, 5, 6,
- 0, 0, 0,
- 3, 2, 1
- };
-
- std::vector<float> biasValues = { 0 };
-
- std::vector<float> expectedOutputValues =
- {
- 23, 33, 24,
- 91, 99, 48,
- 26, 50, 19
- };
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<float>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_FLOAT32,
- 2, // strideX
- 2, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-void Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
-
- std::vector<int8_t> filterValues = { 2, 1, 0, 6 };
-
- std::vector<int32_t> biasValues = { 10 };
-
- std::vector<int8_t> expectedOutputValues =
- {
- (1 * 2 + 2 * 1 + 3 * 0 + 4 * 6 + 10) / 2, // 19
- (2 * 2 + 0 * 1 + 4 * 0 + 0 * 6 + 10) / 2, // 7
- (3 * 2 + 4 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 10
- (4 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 9
- };
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- static std::vector<uint8_t> inputValues = { 1, 2, 4, 8 };
-
- std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
-
- std::vector<int32_t> biasValues = { 16 };
-
- // factors to consider:
- // - the filter zero point is non zero, hence the (x-fz)
- // - the output scale is 2 hence the /2
- // - output zero point is non zero, hence the +outZero
- // - RELU cuts negative values and then we add the output zero point
- uint8_t bias = 16;
- uint8_t outZero = 20;
- uint8_t fz = 4; // filter zero point
-
- std::vector<uint8_t> expectedOutputValues =
- {
- std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
- std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
- std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
- std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
- };
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_UINT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_RELU,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues,
- {1.0f}, // biasScale
- {0}, // biasOffset
- {1.0f}, // filterScale
- {4}, // filterOffsets
- 2, // output scale
- 20); // output offset
-}
-
-void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- static std::vector<uint8_t> inputValues = { 1, 2, 4, 1 };
-
- std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
-
- std::vector<int32_t> biasValues = { 0 };
-
- // factors to consider:
- // - the output scale is 2 hence the /2
- // - RELU6 cuts output values at +6
- uint8_t relu6Min = 6 / 2; // divide by output scale
-
- std::vector<uint8_t> expectedOutputValues =
- {
- std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 2 * 1 + 4 * 0 + 1 * 6) / 2)),
- std::min(relu6Min, static_cast<uint8_t>((2 * 2 + 0 * 1 + 1 * 0 + 0 * 6) / 2)),
- std::min(relu6Min, static_cast<uint8_t>((4 * 2 + 1 * 1 + 0 * 0 + 0 * 6) / 2)),
- std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 0 * 1 + 0 * 0 + 0 * 6) / 2))
- };
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_UINT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_RELU6,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-
-void Conv2DPerChannelInt8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1,4,4,2 };
- std::vector<int32_t> filterShape { 4,2,2,2 };
- std::vector<int32_t> biasShape { 4 };
- std::vector<int32_t> outputShape { 1,4,4,4 };
-
- static std::vector<int8_t> inputValues =
- {
- -11, 40,-26, 11,-28, 8, 0, -8,
- -10, 34, 47, 0,-33,-14, 28, 35,
- 6,-28,-26, 8, 13, 33,-31,-41,
- 31,-20,-31,-16, 8,-18,-44, 0
- };
-
- std::vector<float> filterScales = { 1.858268, 2.0, 1.992126, 1.905512 };
- int32_t filterQuantizationDim = 0;
- std::vector<int8_t> filterValues =
- {
- 13,-44, 5,-14, 21,-45, 36,-25,
- -42, -2, 24,-30,-31, 35, 43,-30,
- -20, -5, 25, 17, 18, 20, 4,-46,
- -49, 9, -3,-20, 46, 5, 7,-15
- };
-
- std::vector<int32_t> biasValues = { 0,0,0,0 };
- std::vector<float> biasScales = { 0.721445, 0.7764700055, 0.773414, 0.739787 };
-
- std::vector<int8_t> expectedOutputValues =
- {
- -1, 9, 3, 5, 1, -1, 5, 9,
- 2, 7, -1, 2, 2, 4, 5, 6,
- 1, 1, 4, 4, 2, 0, -4, -3,
- 0, 6, 12, 6, 3, 0, -1, -2,
- 7, -4, 4, 4, 3, 6, 6, 2,
- 0, -3, -1, 4, 4, 8, 3, 1,
- 5, 0, 0, 1, 4, 7, 4, 6,
- 4, 0, 1, 2, 2, 7, 5, 7
- };
- float outputQuantScale = 401.960785f;
- int outputQuantOffset = 3;
- float inputQuantScale = 0.388235f;
- int inputQuantOffset = 1;
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues,
- biasScales,
- {0,0,0,0},
- filterScales,
- {0,0,0,0},
- outputQuantScale,
- outputQuantOffset,
- inputQuantScale,
- inputQuantOffset,
- 1, // depth_multiplier is ignored for conv2d value doesn't matter
- filterQuantizationDim);
-}
-
-TEST_SUITE("Convolution2dTest_CpuRefTests")
-{
-
-TEST_CASE ("Conv2DWithBiases_Fp32_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv2DWithBiasesFp32Test(backends);
-}
-
-TEST_CASE ("Conv2DWithBiases_Int8_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv2DWithBiasesInt8Test(backends);
-}
-
-TEST_CASE ("Conv2DPerChannel_Int8_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv2DPerChannelInt8Test(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_CpuRef")
-
-TEST_SUITE("Convolution2dTest_CpuAccTests")
-{
-
-TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test")
-{
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-Conv2DWithBiasesFp32Test(backends);
-}
-
-TEST_CASE ("Conv2DWithBiases_Int8_CpuAcc_Test")
-{
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-Conv2DWithBiasesInt8Test(backends);
-}
-
-TEST_CASE ("Conv2DPerChannel_Int8_CpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- Conv2DPerChannelInt8Test(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
-
-TEST_SUITE("Convolution2dTest_GpuAccTests")
-{
-
-TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test")
-{
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-Conv2DWithBiasesFp32Test(backends);
-}
-
-TEST_CASE ("Conv2DWithBiases_Int8_GpuAcc_Test")
-{
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-Conv2DWithBiasesInt8Test(backends);
-}
-
-TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- Conv2DPerChannelInt8Test(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
-
-void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> transposeTensorShape { 4 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
- static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
- std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
- std::vector<int8_t> expectedOutputValues =
- {
- 0, 1, 2,
- 2, 11, 12,
- 6, 20, 16
- };
-
- tflite::Padding padding = tflite::Padding_VALID;
- TransposeConvTest<int8_t>(backends,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- padding,
- transposeTensorShape,
- filterShape,
- inputShape,
- outputShape,
- transposeData,
- filterValues,
- inputValues,
- expectedOutputValues);
-}
-
-void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> transposeTensorShape { 4 };
- std::vector<int32_t> filterShape { 1, 2, 2, 1 };
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
- static std::vector<float> inputValues = { 1, 2, 3, 4 };
- std::vector<float> filterValues = { 0, 1, 2, 4 };
- std::vector<float> expectedOutputValues =
- {
- 0, 1, 2,
- 2, 11, 12,
- 6, 20, 16
- };
-
- tflite::Padding padding = tflite::Padding_VALID;
- TransposeConvTest<float>(backends,
- ::tflite::TensorType_FLOAT32,
- 1, // strideX
- 1, // strideY
- padding,
- transposeTensorShape,
- filterShape,
- inputShape,
- outputShape,
- transposeData,
- filterValues,
- inputValues,
- expectedOutputValues);
-}
-
-TEST_SUITE("TransposeConv_CpuRef_Test")
-{
-
-TEST_CASE ("TransposeConv_CpuRef_Fp32_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_CpuRef_Int8_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- TransposeConvInt8Test(backends);
-}
-
-} // End of TEST_SUITE(TransposeConv_CpuRef_Test)
-
-TEST_SUITE("TransposeConv_CpuAcc_Test")
-{
-
-TEST_CASE ("TransposeConv_CpuAcc_Fp32_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_CpuAcc_Int8_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- TransposeConvInt8Test(backends);
-}
-
-} // End of TEST_SUITE(TransposeConv_CpuAcc_Test)
-
-TEST_SUITE("TransposeConv_GpuAcc_Test")
-{
-
-TEST_CASE ("TransposeConv_GpuAcc_Fp32_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- TransposeConvFp32Test(backends);
-}
-
-TEST_CASE ("TransposeConv_GpuAcc_Int8_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- TransposeConvInt8Test(backends);
-}
-
-} // End of TEST_SUITE(TransposeConv_GpuAcc_Test)
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/Convolution3dTest.cpp b/delegate/src/test/Convolution3dTest.cpp
deleted file mode 100644
index 06883f186d..0000000000
--- a/delegate/src/test/Convolution3dTest.cpp
+++ /dev/null
@@ -1,318 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvolutionTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// Conv3d is currently only supports Float32 inputs, filter, bias and outputs in TFLite.
-// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
-#if defined(ARMNN_POST_TFLITE_2_5)
-
-// Create a vector from 0 to size divided to create smaller floating point values.
-template <typename T>
-std::vector<T> CreateFloatData(int32_t size, float divisor)
-{
- std::vector<float> data;
- for (int32_t i = 0; i < size; ++i)
- {
- float value = static_cast<float>(i);
- data.push_back(value/divisor);
- }
- return data;
-}
-
-void Conv3DWithBiasesSimpleWithPaddingFp32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 2, 1 };
- std::vector<int32_t> filterShape { 2, 2, 2, 1, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 2, 1 };
-
- static std::vector<float> inputValues =
- {
- 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f
- };
-
- std::vector<float> filterValues =
- {
- 2.f,1.f, 1.f,0.f, 0.f,1.f, 1.f,1.f
- };
-
- std::vector<float> biasValues = { 5.f };
-
- std::vector<float> expectedOutputValues =
- {
- 33.f, 21.f, 23.f, 13.f, 28.f, 25.f, 27.f, 21.f
- };
-
- Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
- ::tflite::TensorType_FLOAT32,
- { 1, 1, 1 }, // strideX, strideY, strideZ
- { 1, 1, 1 }, // dilationX, dilationY, dilationZ
- tflite::Padding_SAME,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-void Conv3DWithBiasesStridesFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 3, 10, 10, 1 };
- std::vector<int32_t> filterShape { 3, 5, 5, 1, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 1, 3, 3, 1 };
-
- std::vector<float> inputValues = CreateFloatData<float>(300, 1.0f);
-
- std::vector<float> filterValues =
- {
- 1.f, 1.f, 1.f, 1.f, 1.f,
- 1.f, 1.f, 1.f, 1.f, 1.f,
- 1.f, 1.f, 1.f, 1.f, 1.f,
- 1.f, 1.f, 1.f, 1.f, 1.f,
- 1.f, 1.f, 1.f, 1.f, 1.f,
-
- 0.f, 0.f, 0.f, 0.f, 0.f,
- 0.f, 0.f, 0.f, 0.f, 0.f,
- 0.f, 0.f, 0.f, 0.f, 0.f,
- 0.f, 0.f, 0.f, 0.f, 0.f,
- 0.f, 0.f, 0.f, 0.f, 0.f,
-
- 2.f, 2.f, 2.f, 2.f, 2.f,
- 2.f, 2.f, 2.f, 2.f, 2.f,
- 2.f, 2.f, 2.f, 2.f, 2.f,
- 2.f, 2.f, 2.f, 2.f, 2.f,
- 2.f, 2.f, 2.f, 2.f, 2.f
- };
-
- std::vector<float> biasValues = { 10.f };
-
- std::vector<float> expectedOutputValues =
- {
- 11660.f, 11810.f, 11960.f,
-
- 13160.f, 13310.f, 13460.f,
-
- 14660.f, 14810.f, 14960.f
- };
-
- Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
- ::tflite::TensorType_FLOAT32,
- { 2, 2, 2 }, // strideX, strideY, strideZ
- { 1, 1, 1 }, // dilationX, dilationY, dilationZ
- tflite::Padding_VALID,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-
-void Conv3DWithBiasesDilationFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 5, 5, 5, 2 };
- std::vector<int32_t> filterShape { 2, 2, 2, 2, 2 };
- std::vector<int32_t> biasShape { 2 };
- std::vector<int32_t> outputShape { 1, 2, 2, 2, 2 };
-
- std::vector<float> inputValues = CreateFloatData<float>(250, 1.0f);
-
- std::vector<float> filterValues =
- {
- -1.f, -1.f, -1.f, -1.f, -1.f, -1.f, -1.f, -1.f, -1.f, -1.f, -1.f, 1.f, 1.f, 1.f, -1.f, -1.f,
- 1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, -1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f,
- };
-
- std::vector<float> biasValues = { 0.f, 2.f };
-
- // Since the dilation rate is 3 this will dilate the kernel to be 4x4,
- // therefore the output will be 2x2
- std::vector<float> expectedOutputValues =
- {
- -1124.f, 976.f,
- -1148.f, 980.f,
-
- -1244.f, 996.f,
- -1268.f, 1000.f,
-
- -1724.f, 1076.f,
- -1748.f, 1080.f,
-
- -1844.f, 1096.f,
- -1868.f, 1100.f
- };
-
- Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
- ::tflite::TensorType_FLOAT32,
- { 1, 1, 1 }, // strideX, strideY, strideZ
- { 3, 3, 3 }, // dilationX, dilationY, dilationZ
- tflite::Padding_VALID,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-void Conv3DFp32SmallTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 3, 10, 10, 1 };
- std::vector<int32_t> filterShape { 3, 3, 3, 1, 1 };
- std::vector<int32_t> biasShape { 1 };
- std::vector<int32_t> outputShape { 1, 1, 4, 4, 1 };
-
- std::vector<float> inputValues = CreateFloatData<float>(300, 100.0f);
-
- std::vector<float> filterValues =
- {
- 0.125977f, 0.150391f, 0.101562f,
- 0.0585938f, 0.0864258f, 0.043457f,
- 0.034668f, 0.0322266f, 0.0385742f,
-
- 0.125977f, 0.150391f, -0.101562f,
- -0.0585938f,-0.0864258f,-0.043457f,
- -0.0104630f, 0.0154114f, 0.0013768f,
-
- 0.0344238f, 0.035644f, 0.0495605f,
- 0.0683594f, 0.099121f, -0.0461426f,
- -0.0996094f,-0.126953f, -0.043457f,
- };
-
- std::vector<float> biasValues = { 0 };
-
- std::vector<float> expectedOutputValues =
- {
- -0.08156067f, -0.06891209f, -0.05589598f, -0.04310101f,
- 0.04584253f, 0.05855697f, 0.07129729f, 0.08325434f,
- 0.17304349f, 0.18521416f, 0.19818866f, 0.21096253f,
- 0.29965734f, 0.312698f, 0.32547557f, 0.33818722f
- };
-
- Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
- ::tflite::TensorType_FLOAT32,
- { 2, 2, 2 }, // strideX, strideY, strideZ
- { 1, 1, 1 }, // dilationX, dilationY, dilationZ
- tflite::Padding_VALID,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-TEST_SUITE("Convolution3dTest_CpuRefTests")
-{
-
-TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DWithBiasesStrides_Fp32_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv3DWithBiasesStridesFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DWithBiasesDilation_Fp32_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv3DWithBiasesDilationFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DFp32Small_Fp32_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- Conv3DFp32SmallTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution3dTest_CpuRefTests")
-
-TEST_SUITE("Convolution3dTest_CpuAccTests")
-{
-
-TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_CpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DWithBiasesStrides_Fp32_CpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- Conv3DWithBiasesStridesFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DFp32Small_Fp32_CpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- Conv3DFp32SmallTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution3dTest_CpuAccTests")
-
-TEST_SUITE("Convolution3dTest_GpuAccTests")
-{
-
-TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_GpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DWithBiasesStrides_Fp32_GpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- Conv3DWithBiasesStridesFp32Test(backends);
-}
-
-TEST_CASE ("Conv3DFp32Small_Fp32_GpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- Conv3DFp32SmallTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution3dTest_GpuAccTests")
-
-#endif
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ConvolutionTestHelper.hpp b/delegate/src/test/ConvolutionTestHelper.hpp
deleted file mode 100644
index 70c1da6dce..0000000000
--- a/delegate/src/test/ConvolutionTestHelper.hpp
+++ /dev/null
@@ -1,784 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename T, typename B = float>
-std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
- tflite::TensorType tensorType,
- uint32_t strideX,
- uint32_t strideY,
- uint32_t dilationX,
- uint32_t dilationY,
- tflite::Padding padding,
- tflite::ActivationFunctionType fused_activation_function,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& filterTensorShape,
- const std::vector <int32_t>& biasTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- const std::vector <T>& filterData,
- const std::vector <B>& biasData,
- const std::vector<float> biasScales = {1.0f},
- const std::vector<int64_t> biasOffsets = {0},
- const std::vector<float> filterScales = {1.0f},
- const std::vector<int64_t> filterOffsets = {0},
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0,
- float quantScale = 1.0f,
- int quantOffset = 0,
- int32_t depth_multiplier = 1,
- int32_t filterQuantizationDim = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
- buffers[1] = CreateBuffer(flatBufferBuilder);
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
- sizeof(T) * filterData.size()));
-
- buffers[3] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
- sizeof(B) * biasData.size()));
- buffers[4] = CreateBuffer(flatBufferBuilder);
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
- auto outputQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
-
- auto filterQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>(filterScales),
- flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
- tflite::QuantizationDetails_NONE,
- 0,
- filterQuantizationDim);
-
- auto biasQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>(biasScales),
- flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
- filterTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("filter"),
- filterQuantizationParameters);
-
- auto biasTensorType = ::tflite::TensorType_FLOAT32;
- if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
- {
- biasTensorType = ::tflite::TensorType_INT32;
- }
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
- biasTensorType,
- 3,
- flatBufferBuilder.CreateString("bias"),
- biasQuantizationParameters);
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 4,
- flatBufferBuilder.CreateString("output"),
- outputQuantizationParameters);
-
- flatbuffers::Offset<void> operatorBuiltinOptions;
- tflite::BuiltinOptions operatorBuiltinOptionsType;
-
- if(convolutionOperatorCode == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
- {
- operatorBuiltinOptionsType = tflite::BuiltinOptions_DepthwiseConv2DOptions;
- operatorBuiltinOptions = CreateDepthwiseConv2DOptions(flatBufferBuilder,
- padding,
- strideX,
- strideY,
- depth_multiplier,
- fused_activation_function,
- dilationX,
- dilationY).Union();
- }
- if(convolutionOperatorCode == tflite::BuiltinOperator_CONV_2D)
- {
- operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv2DOptions;
- operatorBuiltinOptions = CreateConv2DOptions(flatBufferBuilder,
- padding,
- strideX,
- strideY,
- fused_activation_function,
- dilationX,
- dilationY).Union();
- }
-
- // create operator
- const std::vector<int> operatorInputs{0, 1, 2};
- const std::vector<int> operatorOutputs{3};
- flatbuffers::Offset <Operator> convolutionOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1, 2};
- const std::vector<int> subgraphOutputs{3};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&convolutionOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Convolution2d Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, convolutionOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T, typename B = float>
-void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
- tflite::TensorType tensorType,
- uint32_t strideX,
- uint32_t strideY,
- uint32_t dilationX,
- uint32_t dilationY,
- tflite::Padding padding,
- tflite::ActivationFunctionType fused_activation_function,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& filterShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& filterValues,
- std::vector<T>& expectedOutputValues,
- const std::vector<int32_t>& biasShape = {},
- const std::vector<B>& biasValues = {},
- const std::vector<float> biasScales = {1.0f},
- const std::vector<int64_t> biasOffsets = {0},
- const std::vector<float> filterScales = {1.0f},
- const std::vector<int64_t> filterOffsets = {0},
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0,
- float quantScale = 1.0f,
- int quantOffset = 0,
- int32_t depth_multiplier = 1,
- int32_t filterQuantizationDim = 3)
-
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer;
-
- modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
- tensorType,
- strideX,
- strideY,
- dilationX,
- dilationY,
- padding,
- fused_activation_function,
- inputShape,
- filterShape,
- biasShape,
- outputShape,
- filterValues,
- biasValues,
- biasScales,
- biasOffsets,
- filterScales,
- filterOffsets,
- outputQuantScale,
- outputQuantOffset,
- quantScale,
- quantOffset,
- depth_multiplier,
- filterQuantizationDim);
-
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
- CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
- CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
- }
-}
-
-// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
-#if defined(ARMNN_POST_TFLITE_2_5)
-template <typename T, typename B = float>
-std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
- tflite::TensorType tensorType,
- std::vector<uint32_t> strides,
- std::vector<uint32_t> dilation,
- tflite::Padding padding,
- tflite::ActivationFunctionType fused_activation_function,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& filterTensorShape,
- const std::vector<int32_t>& biasTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- const std::vector<T>& filterData,
- const std::vector<B>& biasData,
- const std::vector<float> biasScales = {1.0f},
- const std::vector<int64_t> biasOffsets = {0},
- const std::vector<float> filterScales = {1.0f},
- const std::vector<int64_t> filterOffsets = {0},
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0,
- float quantScale = 1.0f,
- int quantOffset = 0,
- int32_t depth_multiplier = 1,
- int32_t filterQuantizationDim = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
- sizeof(T) * filterData.size()));
-
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
- sizeof(B) * biasData.size()));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
- auto outputQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
-
- auto filterQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>(filterScales),
- flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
- tflite::QuantizationDetails_NONE,
- 0,
- filterQuantizationDim);
-
- auto biasQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>(biasScales),
- flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
- filterTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("filter"),
- filterQuantizationParameters);
-
- auto biasTensorType = ::tflite::TensorType_FLOAT32;
- if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
- {
- biasTensorType = ::tflite::TensorType_INT32;
- }
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
- biasTensorType,
- 2,
- flatBufferBuilder.CreateString("bias"),
- biasQuantizationParameters);
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- outputQuantizationParameters);
-
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv3DOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateConv3DOptions(flatBufferBuilder,
- padding,
- strides[2], // Depth
- strides[0], // Width
- strides[1], // Height
- fused_activation_function,
- dilation[2],
- dilation[0],
- dilation[1]).Union();
-
- // Create operator
- const std::vector<int> operatorInputs{0, 1, 2};
- const std::vector<int> operatorOutputs{3};
- flatbuffers::Offset <Operator> convolutionOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1, 2};
- const std::vector<int> subgraphOutputs{3};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&convolutionOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Convolution 3d Operator Model");
-
- // If using an operator with a code greater than 127 then the enum value should be passed as the fifth
- // parameter rather than the second like in other tests.
- flatbuffers::Offset <OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder, 0, 0, 1, tflite::BuiltinOperator_CONV_3D);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T, typename B = float>
-void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
- tflite::TensorType tensorType,
- std::vector<uint32_t> strides,
- std::vector<uint32_t> dilation,
- tflite::Padding padding,
- tflite::ActivationFunctionType fused_activation_function,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& filterShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& filterValues,
- std::vector<T>& expectedOutputValues,
- const std::vector<int32_t>& biasShape = {},
- const std::vector<B>& biasValues = {},
- const std::vector<float> biasScales = {1.0f},
- const std::vector<int64_t> biasOffsets = {0},
- const std::vector<float> filterScales = {1.0f},
- const std::vector<int64_t> filterOffsets = {0},
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0,
- float quantScale = 1.0f,
- int quantOffset = 0,
- int32_t depth_multiplier = 1,
- int32_t filterQuantizationDim = 3)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer;
- modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
- tensorType,
- strides,
- dilation,
- padding,
- fused_activation_function,
- inputShape,
- filterShape,
- biasShape,
- outputShape,
- filterValues,
- biasValues,
- biasScales,
- biasOffsets,
- filterScales,
- filterOffsets,
- outputQuantScale,
- outputQuantOffset,
- quantScale,
- quantOffset,
- depth_multiplier,
- filterQuantizationDim);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 1);
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 1);
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 1);
-}
-#endif
-
-template <typename T>
-std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
- uint32_t strideX,
- uint32_t strideY,
- tflite::Padding padding,
- const std::vector <int32_t>& transposeTensorShape,
- const std::vector <int32_t>& filterTensorShape,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- const std::vector <int32_t>& transposeData,
- const std::vector <T>& filterData,
- float filterScale = 1.0f,
- int filterOffset = 0,
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
- sizeof(int32_t) * transposeData.size()));
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
- sizeof(T) * filterData.size()));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
- auto outputQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
- auto filterQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ filterScale }),
- flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(transposeTensorShape.data(),
- transposeTensorShape.size()),
- tflite::TensorType_INT32,
- 1);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
- filterTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("filter"),
- filterQuantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- outputQuantizationParameters);
-
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_TransposeConvOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreateTransposeConvOptions(flatBufferBuilder, padding, strideX, strideY).Union();
-
- // create operator
- const std::vector<int> operatorInputs{0, 1, 2};
- const std::vector<int> operatorOutputs{3};
- flatbuffers::Offset <Operator> convolutionOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1, 2};
- const std::vector<int> subgraphOutputs{3};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&convolutionOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: TransposeConv Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_TRANSPOSE_CONV);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void TransposeConvTest(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
- uint32_t strideX,
- uint32_t strideY,
- tflite::Padding padding,
- const std::vector <int32_t>& transposeTensorShape,
- const std::vector <int32_t>& filterTensorShape,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- const std::vector <int32_t>& transposeData,
- const std::vector <T>& filterData,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- float filterScale = 1.0f,
- int filterOffset = 0,
- float outputQuantScale = 1.0f,
- int outputQuantOffset = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer;
- modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
- strideX,
- strideY,
- padding,
- transposeTensorShape,
- filterTensorShape,
- inputTensorShape,
- outputTensorShape,
- transposeData,
- filterData,
- filterScale,
- filterOffset,
- outputQuantScale,
- outputQuantOffset,
- quantScale,
- quantOffset);
-
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
- }
-}
-
-} // anonymous namespace
-
-
-
-
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp
deleted file mode 100644
index 98323131f9..0000000000
--- a/delegate/src/test/DelegateOptionsTest.cpp
+++ /dev/null
@@ -1,372 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "DelegateOptionsTestHelper.hpp"
-#include <common/include/ProfilingGuid.hpp>
-#include <armnnUtils/Filesystem.hpp>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE("DelegateOptions")
-{
-
-TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
-{
- std::stringstream ss;
- {
- StreamRedirector redirect(std::cout, ss.rdbuf());
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 1, 2, 3, 4 };
- std::vector<float> divData = { 2, 2, 3, 4 };
- std::vector<float> expectedResult = { 1, 2, 2, 2 };
-
- // Enable ReduceFp32ToFp16
- armnn::OptimizerOptions optimizerOptions(true, true, false, false);
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
-
- DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
- }
- // ReduceFp32ToFp16 option is enabled
- CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
- CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
-}
-
-TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
-{
- std::stringstream ss;
- {
- StreamRedirector redirect(std::cout, ss.rdbuf());
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 1, 2, 3, 4 };
- std::vector<float> divData = { 2, 2, 3, 4 };
- std::vector<float> expectedResult = { 1, 2, 2, 2 };
-
- // Enable Debug
- armnn::OptimizerOptions optimizerOptions(false, true, false, false);
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
-
- DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
- }
- // Debug option triggered.
- CHECK(ss.str().find("layerGuid") != std::string::npos);
- CHECK(ss.str().find("layerName") != std::string::npos);
- CHECK(ss.str().find("outputSlot") != std::string::npos);
- CHECK(ss.str().find("shape") != std::string::npos);
- CHECK(ss.str().find("data") != std::string::npos);
-}
-
-TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 1, 2, 3, 4 };
- std::vector<float> divData = { 2, 2, 3, 4 };
- std::vector<float> expectedResult = { 1, 2, 2, 2 };
-
- // Enable debug with debug callback function
- armnn::OptimizerOptions optimizerOptions(false, true, false, false);
- bool callback = false;
- auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
- {
- armnn::IgnoreUnused(guid);
- armnn::IgnoreUnused(slotIndex);
- armnn::IgnoreUnused(tensor);
- callback = true;
- };
-
- armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
- armnnDelegate::DelegateOptions delegateOptions(backends,
- optimizerOptions,
- armnn::EmptyOptional(),
- armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
-
- CHECK(!callback);
-
- DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
-
- // Check that the debug callback function was called.
- CHECK(callback);
-}
-
-TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
- std::vector<uint8_t> divData = { 2, 2, 3, 4 };
- std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
-
- armnn::OptimizerOptions optimizerOptions(false, false, false, true);
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
-
- DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
-}
-
-TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
-{
- std::stringstream stringStream;
- std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
- std::vector<std::string> values { "CpuRef", "1", "1"};
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
- std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
-
- // Create options_keys and options_values char array
- size_t num_options = keys.size();
- std::unique_ptr<const char*> options_keys =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- for (size_t i=0; i<num_options; ++i)
- {
- options_keys.get()[i] = keys[i].c_str();
- options_values.get()[i] = values[i].c_str();
- }
-
- StreamRedirector redirect(std::cout, stringStream.rdbuf());
-
- armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
- DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- expectedResult,
- delegateOptions);
- CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
- != std::string::npos);
-}
-
-TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
-{
- std::stringstream stringStream;
- std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
- std::vector<std::string> values { "CpuRef", "1", "0"};
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
- std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
-
- // Create options_keys and options_values char array
- size_t num_options = keys.size();
- std::unique_ptr<const char*> options_keys =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- for (size_t i=0; i<num_options; ++i)
- {
- options_keys.get()[i] = keys[i].c_str();
- options_values.get()[i] = values[i].c_str();
- }
-
- StreamRedirector redirect(std::cout, stringStream.rdbuf());
-
- armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
- DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- expectedResult,
- delegateOptions);
-
- CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
- == std::string::npos);
-}
-
-}
-
-TEST_SUITE("DelegateOptions_CpuAccTests")
-{
-
-TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 1, 2, 3, 4 };
- std::vector<float> divData = { 2, 2, 3, 4 };
- std::vector<float> expectedResult = { 1, 2, 2, 2 };
-
- unsigned int numberOfThreads = 2;
-
- armnn::ModelOptions modelOptions;
- armnn::BackendOptions cpuAcc("CpuAcc",
- {
- { "FastMathEnabled", true },
- { "NumberOfThreads", numberOfThreads }
- });
- modelOptions.push_back(cpuAcc);
-
- armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
-
- DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
-}
-
-TEST_CASE ("ArmnnDelegateSerializeToDot")
-{
- const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
- if ( fs::exists(filename) )
- {
- fs::remove(filename);
- }
- std::stringstream ss;
- {
- StreamRedirector redirect(std::cout, ss.rdbuf());
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 1, 2, 3, 4 };
- std::vector<float> divData = { 2, 2, 3, 4 };
- std::vector<float> expectedResult = { 1, 2, 2, 2 };
-
- armnn::OptimizerOptions optimizerOptions(false, false, false, false);
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
- // Enable serialize to dot by specifying the target file name.
- delegateOptions.SetSerializeToDot(filename);
- DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
- }
- CHECK(fs::exists(filename));
- // The file should have a size greater than 0 bytes.
- CHECK(fs::file_size(filename) > 0);
- // Clean up.
- fs::remove(filename);
-}
-
-void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
- std::vector<std::string>& values,
- std::stringstream& ss)
-{
- StreamRedirector redirect(std::cout, ss.rdbuf());
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
- std::vector<float> inputData = { 1, 2, 3, 4 };
- std::vector<float> divData = { 2, 2, 3, 4 };
- std::vector<float> expectedResult = { 1, 2, 2, 2 };
-
- // Create options_keys and options_values char array
- size_t num_options = keys.size();
- std::unique_ptr<const char*> options_keys =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- std::unique_ptr<const char*> options_values =
- std::unique_ptr<const char*>(new const char*[num_options + 1]);
- for (size_t i=0; i<num_options; ++i)
- {
- options_keys.get()[i] = keys[i].c_str();
- options_values.get()[i] = values[i].c_str();
- }
-
- armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
- DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- tensorShape,
- inputData,
- inputData,
- divData,
- expectedResult,
- delegateOptions);
-}
-
-TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
-{
- SUBCASE("Fp16=1")
- {
- std::stringstream ss;
- std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
- std::vector<std::string> values { "CpuRef", "1", "1", "info"};
- CreateFp16StringParsingTestRun(keys, values, ss);
- CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
- CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
- }
- SUBCASE("Fp16=true")
- {
- std::stringstream ss;
- std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
- std::vector<std::string> values { "CpuRef", "TRUE", "true"};
- CreateFp16StringParsingTestRun(keys, values, ss);
- CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
- CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
- }
- SUBCASE("Fp16=True")
- {
- std::stringstream ss;
- std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
- std::vector<std::string> values { "CpuRef", "true", "True"};
- CreateFp16StringParsingTestRun(keys, values, ss);
- CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
- CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
- }
- SUBCASE("Fp16=0")
- {
- std::stringstream ss;
- std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
- std::vector<std::string> values { "CpuRef", "true", "0"};
- CreateFp16StringParsingTestRun(keys, values, ss);
- CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
- CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
- }
- SUBCASE("Fp16=false")
- {
- std::stringstream ss;
- std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
- std::vector<std::string> values { "CpuRef", "1", "false"};
- CreateFp16StringParsingTestRun(keys, values, ss);
- CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
- CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
- }
-}
-
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/test/DelegateOptionsTestHelper.hpp b/delegate/src/test/DelegateOptionsTestHelper.hpp
deleted file mode 100644
index 00a3d95904..0000000000
--- a/delegate/src/test/DelegateOptionsTestHelper.hpp
+++ /dev/null
@@ -1,344 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn_delegate.hpp>
-
-#include "ConvolutionTestHelper.hpp"
-#include "TestUtils.hpp"
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-struct StreamRedirector
-{
-public:
- StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer)
- : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {}
-
- ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
-
-private:
- std::ostream &m_Stream;
- std::streambuf *m_BackupBuffer;
-};
-
-std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
- const std::vector<int32_t>& tensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
-
- std::array<flatbuffers::Offset<Tensor>, 5> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input_0"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("input_1"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("input_2"),
- quantizationParameters);
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 4,
- flatBufferBuilder.CreateString("add"),
- quantizationParameters);
- tensors[4] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 5,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions;
- flatbuffers::Offset<void> addBuiltinOptions =
- CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
-
- tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions;
- flatbuffers::Offset<void> divBuiltinOptions =
- CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
-
- std::array<flatbuffers::Offset<Operator>, 2> operators;
- const std::vector<int32_t> addInputs{0, 1};
- const std::vector<int32_t> addOutputs{3};
- operators[0] = CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()),
- addBuiltinOptionsType,
- addBuiltinOptions);
- const std::vector<int32_t> divInputs{3, 2};
- const std::vector<int32_t> divOutputs{4};
- operators[1] = CreateOperator(flatBufferBuilder,
- 1,
- flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()),
- divBuiltinOptionsType,
- divBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1, 2};
- const std::vector<int> subgraphOutputs{4};
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(operators.data(), operators.size()));
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model");
-
- std::array<flatbuffers::Offset<OperatorCode>, 2> codes;
- codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD);
- codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV);
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(codes.data(), codes.size()),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
- const std::vector <int32_t>& tensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- const std::vector<int32_t> operatorInputs({0});
- const std::vector<int32_t> operatorOutputs({1});
-
- flatbuffers::Offset<Operator> ceilOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_NONE);
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model");
- flatbuffers::Offset<OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CEIL);
-
- const std::vector<int32_t> subgraphInputs({0});
- const std::vector<int32_t> subgraphOutputs({1});
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&ceilOperator, 1));
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void DelegateOptionTest(tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& tensorShape,
- std::vector<T>& input0Values,
- std::vector<T>& input1Values,
- std::vector<T>& input2Values,
- std::vector<T>& expectedOutputValues,
- const armnnDelegate::DelegateOptions& delegateOptions,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
- tensorShape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values);
-
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-template <typename T>
-void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& tensorShape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- const armnnDelegate::DelegateOptions& delegateOptions,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateCeilTfLiteModel(tensorType,
- tensorShape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- try
- {
- armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get());
- }
- catch (const armnn::Exception& e)
- {
- // Forward the exception message to std::cout
- std::cout << e.what() << std::endl;
- }
-
- // Set input data
- armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/DepthwiseConvolution2dTest.cpp b/delegate/src/test/DepthwiseConvolution2dTest.cpp
deleted file mode 100644
index ca10f2c0cb..0000000000
--- a/delegate/src/test/DepthwiseConvolution2dTest.cpp
+++ /dev/null
@@ -1,282 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvolutionTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void DepthwiseConv2dValidReluFp32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 2, 2 };
- std::vector<int32_t> filterShape { 1, 2, 2, 4 };
- std::vector<int32_t> biasShape { 4 };
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- static std::vector<float> inputValues =
- {
- 1, 2, 7, 8,
- 3, 4, 9, 10,
- 5, 6, 11, 12
- };
-
- std::vector<float> filterValues =
- {
- 1, 2, 3, 4,
- -9, 10, -11, 12,
- 5, 6, 7, 8,
- 13, -14, 15, -16
- };
-
- std::vector<float> biasValues = { 1, 2, 3, 4 };
-
- std::vector<float> expectedOutputValues =
- {
- 71, 0, 99, 0,
- 91, 0, 127, 0
- };
-
- tflite::Padding padding = tflite::Padding_VALID;
- int32_t depth_multiplier = 2;
-
- ConvolutionTest<float>(tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
- ::tflite::TensorType_FLOAT32,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_RELU,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues,
- {1.0f}, // biasScale
- {0}, // biasOffset
- {1.0f}, // filterScale
- {0}, // filterOffsets
- 2.0f, // outputQuantScale
- 0, // outputQuantOffset
- 1.0f, // quantScale
- 0, // quantOffset
- depth_multiplier);
-}
-
-void DepthwiseConv2dSameUint8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 3, 1 };
- std::vector<int32_t> filterShape { 1, 3, 3, 1 };
- std::vector<int32_t> biasShape { 1 } ;
- std::vector<int32_t> outputShape { 1, 3, 3, 1 };
-
- static std::vector<uint8_t> inputValues =
- {
- 0, 1, 2,
- 3, 4, 5,
- 6, 7, 8
- };
-
- std::vector<uint8_t> filterValues = { 9, 8, 7, 6, 5, 4, 3, 2, 1 };
-
- std::vector<int32_t> biasValues = { 10 };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 12, 23, 24, // ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
- 34, 65, 61, // ( 57+10)/2, (120+10)/2, (111+10)/2,
- 60, 104, 84 // (110+10)/2, (197+10)/2, (158+10)/2
- };
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
- ::tflite::TensorType_UINT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues);
-}
-
-void DepthwiseConv2dSameInt8PerChannelTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 4, 4, 4 };
- std::vector<int32_t> filterShape { 1, 2, 2, 16 };
- std::vector<int32_t> biasShape {16} ;
- std::vector<int32_t> outputShape { 1, 4, 4, 16 };
-
- static std::vector<int8_t> inputValues =
- {
- 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
- 3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
- 3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
- 4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2
- };
-
- std::vector<int8_t> filterValues = { 12,20,10, 3, 2,24, 9,10, 5,16,30,12, 3,10, 4,32,
- 8, 0,30, 3, 0,16,12,15,20,12, 0, 3, 9,20, 8, 8,
- 12,15,20, 0, 0, 0, 3,15,15, 8,40,12, 9, 5, 2,24,
- 4, 0, 0, 6, 6, 0, 3, 5,20, 8,20, 3, 6,15, 4, 0 };
- std::vector<float> filterScales = { 0.25, 0.2, 0.1, 0.3333333333,
- 0.5, 0.125, 0.33333333, 0.2,
- 0.2, 0.25, 0.1, 0.333333333,
- 0.3333333333, 0.2, 0.5, 0.125 };
-
- int32_t filterQuantizationDim = 3;
-
- int32_t depth_multiplier = 4;
-
- std::vector<int32_t> biasValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
- float inputScale = 1.0f;
- std::vector<float> biasScales {};
- std::vector<int64_t> biasOffsets {};
- std::vector<int64_t> filterOffsets {};
- for (const auto& filterScale: filterScales)
- {
- biasScales.push_back(inputScale * filterScale);
- // filter and bias offset always needs to be zero for per channel. We don't support anything else
- biasOffsets.push_back(0);
- filterOffsets.push_back(0);
- }
-
- std::vector<int8_t> expectedOutputValues =
- {
- 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
- 16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
- 12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
- 0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
- 20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
- 18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
- 27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
- 9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
- 26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
- 20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
- 28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
- 12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
- 14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
- 9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
- 11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
- 3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8
- };
-
- tflite::Padding padding = tflite::Padding_SAME;
-
- ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
- ::tflite::TensorType_INT8,
- 1, // strideX
- 1, // strideY
- 1, // dilationX
- 1, // dilationY
- padding,
- tflite::ActivationFunctionType_NONE,
- backends,
- inputShape,
- filterShape,
- outputShape,
- inputValues,
- filterValues,
- expectedOutputValues,
- biasShape,
- biasValues,
- biasScales,
- biasOffsets,
- filterScales,
- filterOffsets,
- 1.0f,
- 0,
- inputScale,
- 0,
- depth_multiplier,
- filterQuantizationDim);
-}
-
-TEST_SUITE("DepthwiseConv2d_CpuRef_Tests")
-{
-
-TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- DepthwiseConv2dValidReluFp32Test(backends);
-}
-
-TEST_CASE ("DepthwiseConv2d_Same_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- DepthwiseConv2dSameUint8Test(backends);
-}
-
-TEST_CASE ("DepthwiseConv2d_Same_Int8_PerChannelQuantization_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- DepthwiseConv2dSameInt8PerChannelTest(backends);
-}
-
-}//End of TEST_SUITE("DepthwiseConv2d_CpuRef_Tests")
-
-TEST_SUITE("DepthwiseConv2d_CpuAcc_Tests")
-{
-
-TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- DepthwiseConv2dValidReluFp32Test(backends);
-}
-
-TEST_CASE ("DepthwiseConv2d_Same_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- DepthwiseConv2dSameUint8Test(backends);
-}
-
-}//End of TEST_SUITE("DepthwiseConv2d_CpuAcc_Tests")
-
-TEST_SUITE("DepthwiseConv2d_GpuAcc_Tests")
-{
-
-TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- DepthwiseConv2dValidReluFp32Test(backends);
-}
-
-TEST_CASE ("DepthwiseConv2d_Same_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- DepthwiseConv2dSameUint8Test(backends);
-}
-
-}//End of TEST_SUITE("DepthwiseConv2d_GpuAcc_Tests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp
deleted file mode 100644
index 8099efebff..0000000000
--- a/delegate/src/test/ElementwiseBinaryTest.cpp
+++ /dev/null
@@ -1,1136 +0,0 @@
-//
-// Copyright © 2020-2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ElementwiseBinaryTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void AddFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 3 };
-
- std::vector<float> input0Values =
- {
- 0.0f, 2.0f, 1.0f,
- 0.2f, 1.0f, 2.0f,
-
- 1.0f, 2.0f, 1.0f,
- 0.2f, 1.0f, 2.0f,
-
- 0.0f, 2.0f, 1.0f,
- 4.2f, 1.0f, 2.0f,
-
- 0.0f, 0.0f, 1.0f,
- 0.2f, 1.0f, 2.0f,
- };
-
- std::vector<float> input1Values =
- {
- 1.0f, 2.0f, 1.0f,
- 0.0f, 1.0f, 2.0f,
-
- 1.0f, 2.0f, -2.0f,
- 0.2f, 1.0f, 2.0f,
-
- 0.0f, 2.0f, 1.0f,
- 4.2f, 0.0f, -3.0f,
-
- 0.0f, 0.0f, 1.0f,
- 0.7f, 1.0f, 5.0f,
- };
-
- std::vector<float> expectedOutputValues =
- {
- 1.0f, 4.0f, 2.0f,
- 0.2f, 2.0f, 4.0f,
-
- 2.0f, 4.0f, -1.0f,
- 0.4f, 2.0f, 4.0f,
-
- 0.0f, 4.0f, 2.0f,
- 8.4f, 1.0f, -1.0f,
-
- 0.0f, 0.0f, 2.0f,
- 0.9f, 2.0f, 7.0f,
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void AddBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 1, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 3, 2, 3 };
-
- std::vector<float> input0Values
- {
- 0.0f,
- 1.0f,
-
- 2.0f,
- 3.0f,
-
- 4.0f,
- 5.0f,
- };
- std::vector<float> input1Values
- {
- 0.5f, 1.5f, 2.5f,
- 3.5f, 4.5f, 5.5f,
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.5f, 1.5f, 2.5f,
- 4.5f, 5.5f, 6.5f,
-
- 2.5f, 3.5f, 4.5f,
- 6.5f, 7.5f, 8.5f,
-
- 4.5f, 5.5f, 6.5f,
- 8.5f, 9.5f, 10.5f,
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void AddConstInputTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 3, 2, 1 };
-
- std::vector<float> input0Values
- {
- 0.0f,
- 1.0f,
-
- 2.0f,
- 3.0f,
-
- 4.0f,
- 5.0f,
- };
- std::vector<float> input1Values
- {
- 0.5f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.5f,
- 1.5f,
-
- 2.5f,
- 3.5f,
-
- 4.5f,
- 5.5f,
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- 1.0f,
- 0,
- true);
-}
-
-void AddActivationTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> input0Values { 4.0f, 0.8f, 0.7f, -0.8f };
- std::vector<float> input1Values { 0.7f, -1.2f, 0.8f, 0.5f };
- std::vector<float> expectedOutputValues { 4.7f, 0.0f, 1.5f, 0.0f };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
- tflite::ActivationFunctionType_RELU,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void AddUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<uint8_t> input0Values =
- {
- 63, 35, 77, 70, 56, 112,
- 203, 28, 252, 168, 245, 91
- };
-
- std::vector<uint8_t> input1Values =
- {
- 21, 7, 175, 231, 175, 210,
- 126, 161, 63, 21, 105, 126
- };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 81, 39, 249, 255, 228, 255,
- 255, 186, 255, 186, 255, 214,
- };
-
- ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_ADD,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues, 7.0f, 3);
-}
-
-void DivFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
- 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
-
- };
-
- std::vector<float> input1Values =
- {
- 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
- 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 2.f, 2.f, 2.f, 2.f, 1.50f, 1.50f, 1.50f, 1.50f,
- 1.f, 1.f, 1.f, 1.f, 1.25f, 1.25f, 1.25f, 1.25f
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void DivBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
-
- std::vector<float> input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 };
- std::vector<float> input1Values = { 2 };
- std::vector<float> expectedOutputValues = { 1, 2, 3, 4, 5, 6, 7, 8 };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void DivUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<uint8_t> input0Values =
- {
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5
-
- };
-
- std::vector<uint8_t> input1Values =
- {
- 1, 1, 1, 1, 2, 2, 2, 2,
- 4, 4, 4, 4, 4, 4, 4, 4
- };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 8, 8, 8, 8, 6, 6, 6, 6,
- 4, 4, 4, 4, 5, 5, 5, 5
- };
-
- ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_DIV,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues, 0.25f, 0);
-}
-
-void FloorDivFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- -37.5f, -15.2f, -8.76f, -2.0f, -2.6f, -1.0f, -0.8f, 0.0f,
- 4.0f, 1.6f, 2.0f, 5.2f, 6.0f, 35.04f, 60.8f, 150.0f
- };
-
- std::vector<float> input1Values =
- {
- 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
- 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f
- };
-
- std::vector<float> expectedOutputValues =
- {
- -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
- 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 8.0f, 15.0f, 37.0f
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_FLOOR_DIV,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-
-}
-
-void MaxFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- 1.f, 1.f, 5.f, 1.f, 2.f, 2.f, 7.f, 2.f,
- 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
-
- };
-
- std::vector<float> input1Values =
- {
- 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
- 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 2.f, 2.f, 5.f, 2.f, 3.f, 3.f, 7.f, 3.f,
- 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void MaxBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
-
- std::vector<float> input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
- std::vector<float> input1Values = { 4.f };
- std::vector<float> expectedOutputValues = { 4.f, 4.f, 4.f, 4.f, 5.f, 6.f, 7.f, 8.f };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void MaxUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<uint8_t> input0Values =
- {
- 1, 1, 1, 1, 7, 8, 9, 9,
- 3, 3, 3, 3, 4, 4, 4, 4
-
- };
-
- std::vector<uint8_t> input1Values =
- {
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5
- };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 2, 2, 2, 2, 7, 8, 9, 9,
- 4, 4, 4, 4, 5, 5, 5, 5
- };
-
- ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MAXIMUM,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues, 1.0f, 0);
-}
-
-void MinFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- 1.f, 1.f, 5.f, 1.f, 2.f, 2.f, 7.f, 2.f,
- 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
-
- };
-
- std::vector<float> input1Values =
- {
- 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
- 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 1.f, 1.f, 2.f, 1.f, 2.f, 2.f, 3.f, 2.f,
- 1.f, 1.f, 1.f, 1.f, 4.f, 4.f, 4.f, 4.f
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void MinBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
-
- std::vector<float> input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
-
- std::vector<float> input1Values = { 4.f };
-
- std::vector<float> expectedOutputValues = { 1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f, 4.f };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void MinUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<uint8_t> input0Values =
- {
- 1, 1, 1, 1, 7, 8, 9, 9,
- 3, 3, 3, 3, 4, 4, 4, 4
-
- };
-
- std::vector<uint8_t> input1Values =
- {
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5
- };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 1, 1, 1, 1, 3, 3, 3, 3,
- 3, 3, 3, 3, 4, 4, 4, 4
- };
-
- ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MINIMUM,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues, 1.0f, 0);
-}
-
-void MulFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
-
- std::vector<float> input0Values =
- {
- 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
- 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
-
- };
-
- std::vector<float> input1Values =
- {
- 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
- 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 2.f, 2.f, 2.f, 2.f, 6.f, 6.f, 6.f, 6.f,
- 12.f, 12.f, 12.f, 12.f, 20.f, 20.f, 20.f, 20.f
- };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void MulBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
-
- std::vector<float> input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 };
- std::vector<float> input1Values = { 2 };
- std::vector<float> expectedOutputValues = { 4, 8, 12, 16, 20, 24, 28, 32 };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void MulUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
-
- std::vector<uint8_t> input0Values =
- {
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12
-
- };
-
- std::vector<uint8_t> input1Values = { 1, 2, 3 };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 1, 4, 9, 4, 10, 18,
- 7, 16, 27, 10, 22, 36
- };
-
- ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MUL,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues, 1.0f, 0);
-}
-
-void MulActivationTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
- std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
-
- std::vector<float> input0Values { 4.0f, 0.0f, 1.0f, 0.5f };
- std::vector<float> input1Values { -2.0f, -1.2f, 2.5f, 2.0f };
- std::vector<float> expectedOutputValues { 0.0f, 0.0f, 2.5f, 1.0f };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
- tflite::ActivationFunctionType_RELU,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void SubFP32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
-
- std::vector<float> input0Values = { 1, 3, 3, -7 };
- std::vector<float> input1Values = { 1, -1, 0, -2 };
- std::vector<float> expectedOutputValues = { 0, 4, 3, -5 };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void SubBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
-
- std::vector<float> input0Values = { 2, 3, 4, 5};
- std::vector<float> input1Values = { 10 };
- std::vector<float> expectedOutputValues = { -8, -7, -6, -5 };
-
- ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void SubUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
-
- std::vector<uint8_t> input0Values = { 10, 12, 14, 16 };
- std::vector<uint8_t> input1Values = { 2 };
- std::vector<uint8_t> expectedOutputValues = { 8, 10, 12, 14 };
-
- ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_SUB,
- tflite::ActivationFunctionType_NONE,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues, 1.0f, 0);
-}
-
-TEST_SUITE("ElementwiseBinary_GpuAccTests")
-{
-
-TEST_CASE ("ADD_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AddFP32Test(backends);
-}
-
-TEST_CASE ("ADD_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AddBroadcastTest(backends);
-}
-
-TEST_CASE ("ADD_Activation_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AddActivationTest(backends);
-}
-
-TEST_CASE ("ADD_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AddUint8Test(backends);
-}
-
-TEST_CASE ("DIV_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DivBroadcastTest(backends);
-}
-
-TEST_CASE ("FLOORDIV_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FloorDivFP32Test(backends);
-}
-
-TEST_CASE ("MAX_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxFP32Test(backends);
-}
-
-TEST_CASE ("MAX_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxBroadcastTest(backends);
-}
-
-TEST_CASE ("MAX_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxUint8Test(backends);
-}
-
-TEST_CASE ("MIN_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MinFP32Test(backends);
-}
-
-TEST_CASE ("MIN_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MinBroadcastTest(backends);
-}
-
-TEST_CASE ("MIN_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MinUint8Test(backends);
-}
-
-TEST_CASE ("MUL_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MulFP32Test(backends);
-}
-
-TEST_CASE ("MUL_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MulBroadcastTest(backends);
-}
-
-TEST_CASE ("MUL_Activation_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MulActivationTest(backends);
-}
-
-TEST_CASE ("MUL_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MulUint8Test(backends);
-}
-
-TEST_CASE ("SUB_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SubFP32Test(backends);
-}
-
-TEST_CASE ("SUB_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SubBroadcastTest(backends);
-}
-
-TEST_CASE ("SUB_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SubUint8Test(backends);
-}
-
-} //TEST_SUITE("ElementwiseBinary_GpuAccTests")
-
-
-
-TEST_SUITE("ElementwiseBinary_CpuAccTests")
-{
-
-TEST_CASE ("ADD_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AddFP32Test(backends);
-}
-
-TEST_CASE ("ADD_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AddBroadcastTest(backends);
-}
-
-TEST_CASE ("ADD_Activation_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AddActivationTest(backends);
-}
-
-TEST_CASE ("ADD_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AddUint8Test(backends);
-}
-
-TEST_CASE ("DIV_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DivBroadcastTest(backends);
-}
-
-TEST_CASE ("FLOORDIV_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FloorDivFP32Test(backends);
-}
-
-TEST_CASE ("MAX_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxFP32Test(backends);
-}
-
-TEST_CASE ("MAX_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxBroadcastTest(backends);
-}
-
-TEST_CASE ("MAX_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxUint8Test(backends);
-}
-
-TEST_CASE ("MIN_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MinFP32Test(backends);
-}
-
-TEST_CASE ("MIN_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MinBroadcastTest(backends);
-}
-
-TEST_CASE ("MIN_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MinUint8Test(backends);
-}
-
-TEST_CASE ("MUL_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MulFP32Test(backends);
-}
-
-TEST_CASE ("MUL_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MulBroadcastTest(backends);
-}
-
-TEST_CASE ("MUL_Actiation_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MulActivationTest(backends);
-}
-
-TEST_CASE ("MUL_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MulUint8Test(backends);
-}
-
-TEST_CASE ("SUB_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SubFP32Test(backends);
-}
-
-TEST_CASE ("SUB_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SubBroadcastTest(backends);
-}
-
-TEST_CASE ("SUB_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SubUint8Test(backends);
-}
-
-} // TEST_SUITE("ElementwiseBinary_CpuAccTests")
-
-
-TEST_SUITE("ElementwiseBinary_CpuRefTests")
-{
-
-TEST_CASE ("ADD_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AddFP32Test(backends);
-}
-
-TEST_CASE ("ADD_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AddBroadcastTest(backends);
-}
-
-TEST_CASE ("ADD_Constant_Input_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AddConstInputTest(backends);
-}
-
-TEST_CASE ("ADD_Activation_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AddActivationTest(backends);
-}
-
-TEST_CASE ("ADD_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AddUint8Test(backends);
-}
-
-TEST_CASE ("DIV_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DivBroadcastTest(backends);
-}
-
-TEST_CASE ("FLOORDIV_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FloorDivFP32Test(backends);
-}
-
-TEST_CASE ("DIV_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DivUint8Test(backends);
-}
-
-TEST_CASE ("MAX_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxFP32Test(backends);
-}
-
-TEST_CASE ("MAX_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxBroadcastTest(backends);
-}
-
-TEST_CASE ("MAX_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxUint8Test(backends);
-}
-
-TEST_CASE ("MIN_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MinFP32Test(backends);
-}
-
-TEST_CASE ("MIN_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MinBroadcastTest(backends);
-}
-
-TEST_CASE ("MIN_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MinUint8Test(backends);
-}
-
-TEST_CASE ("MUL_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MulFP32Test(backends);
-}
-
-TEST_CASE ("MUL_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MulBroadcastTest(backends);
-}
-
-TEST_CASE ("MUL_Actiation_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MulActivationTest(backends);
-}
-
-TEST_CASE ("MUL_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MulUint8Test(backends);
-}
-
-TEST_CASE ("SUB_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SubFP32Test(backends);
-}
-
-TEST_CASE ("SUB_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SubBroadcastTest(backends);
-}
-
-TEST_CASE ("SUB_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SubUint8Test(backends);
-}
-
-} // TEST_SUITE("ElementwiseBinary_CpuRefTests")
-
-} // namespace armnnDelegate
diff --git a/delegate/src/test/ElementwiseBinaryTestHelper.hpp b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
deleted file mode 100644
index 09a715e7f1..0000000000
--- a/delegate/src/test/ElementwiseBinaryTestHelper.hpp
+++ /dev/null
@@ -1,243 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename T>
-std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator binaryOperatorCode,
- tflite::ActivationFunctionType activationType,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& input0TensorShape,
- const std::vector <int32_t>& input1TensorShape,
- const std::vector <int32_t>& outputTensorShape,
- std::vector<T>& input1Values,
- bool constantInput = false,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- if (constantInput)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(input1Values.data()),
- sizeof(T) * input1Values.size())));
- }
- else
- {
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- }
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
- input0TensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input_0"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
- input1TensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("input_1"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
- flatbuffers::Offset<void> operatorBuiltinOptions = 0;
- switch (binaryOperatorCode)
- {
- case BuiltinOperator_ADD:
- {
- operatorBuiltinOptionsType = BuiltinOptions_AddOptions;
- operatorBuiltinOptions = CreateAddOptions(flatBufferBuilder, activationType).Union();
- break;
- }
- case BuiltinOperator_DIV:
- {
- operatorBuiltinOptionsType = BuiltinOptions_DivOptions;
- operatorBuiltinOptions = CreateDivOptions(flatBufferBuilder, activationType).Union();
- break;
- }
- case BuiltinOperator_MAXIMUM:
- {
- operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
- operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_MINIMUM:
- {
- operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
- operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_MUL:
- {
- operatorBuiltinOptionsType = BuiltinOptions_MulOptions;
- operatorBuiltinOptions = CreateMulOptions(flatBufferBuilder, activationType).Union();
- break;
- }
- case BuiltinOperator_SUB:
- {
- operatorBuiltinOptionsType = BuiltinOptions_SubOptions;
- operatorBuiltinOptions = CreateSubOptions(flatBufferBuilder, activationType).Union();
- break;
- }
- case BuiltinOperator_FLOOR_DIV:
- {
- operatorBuiltinOptionsType = tflite::BuiltinOptions_FloorDivOptions;
- operatorBuiltinOptions = CreateSubOptions(flatBufferBuilder, activationType).Union();
- break;
- }
- default:
- break;
- }
- const std::vector<int32_t> operatorInputs{0, 1};
- const std::vector<int32_t> operatorOutputs{2};
- flatbuffers::Offset <Operator> elementwiseBinaryOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&elementwiseBinaryOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Binary Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, binaryOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
- tflite::ActivationFunctionType activationType,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& input0Shape,
- std::vector<int32_t>& input1Shape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& input0Values,
- std::vector<T>& input1Values,
- std::vector<T>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0,
- bool constantInput = false)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel<T>(binaryOperatorCode,
- activationType,
- tensorType,
- input0Shape,
- input1Shape,
- outputShape,
- input1Values,
- constantInput,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr <Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr <Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
- if (!constantInput)
- {
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, input1Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, input1Values);
- }
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- outputShape,
- expectedOutputValues);
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ElementwiseUnaryTest.cpp b/delegate/src/test/ElementwiseUnaryTest.cpp
deleted file mode 100644
index 4d48d6e2ed..0000000000
--- a/delegate/src/test/ElementwiseUnaryTest.cpp
+++ /dev/null
@@ -1,420 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ElementwiseUnaryTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE("ElementwiseUnary_GpuAccTests")
-{
-
-TEST_CASE ("Abs_Float32_GpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- -0.1f, -0.2f, -0.3f,
- 0.1f, 0.2f, 0.3f
- };
- // Calculate output data
- std::vector<float> expectedOutputValues(inputValues.size());
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- expectedOutputValues[i] = std::abs(inputValues[i]);
- }
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Exp_Float32_GpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 5.0f, 4.0f,
- 3.0f, 2.0f,
- 1.0f, 1.1f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 148.413159102577f, 54.598150033144f,
- 20.085536923188f, 7.389056098931f,
- 2.718281828459f, 3.004166023946f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Log_Float32_GpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 1.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 2.71828f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.f, 0.f, 0.69314718056f,
- 1.09861228867f, 1.38629436112f, 0.99999932734f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Neg_Float32_GpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 1.f, 0.f, 3.f,
- 25.f, 64.f, 100.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- -1.f, 0.f, -3.f,
- -25.f, -64.f, -100.f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Rsqrt_Float32_GpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 1.f, 4.f, 16.f,
- 25.f, 64.f, 100.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 1.f, 0.5f, 0.25f,
- 0.2f, 0.125f, 0.1f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Sin_Float32_GpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 0.0f, 1.0f, 16.0f,
- 0.5f, 36.0f, -1.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.0f, 0.8414709848f, -0.28790331666f,
- 0.4794255386f, -0.99177885344f, -0.8414709848f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
-}
-} // TEST_SUITE("ElementwiseUnary_GpuAccTests")
-
-
-
-TEST_SUITE("ElementwiseUnary_CpuAccTests")
-{
-
-TEST_CASE ("Abs_Float32_CpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- -0.1f, -0.2f, -0.3f,
- 0.1f, 0.2f, 0.3f
- };
- // Calculate output data
- std::vector<float> expectedOutputValues(inputValues.size());
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- expectedOutputValues[i] = std::abs(inputValues[i]);
- }
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Exp_Float32_CpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 5.0f, 4.0f,
- 3.0f, 2.0f,
- 1.0f, 1.1f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 148.413159102577f, 54.598150033144f,
- 20.085536923188f, 7.389056098931f,
- 2.718281828459f, 3.004166023946f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Log_Float32_CpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 1.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 2.71828f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.f, 0.f, 0.69314718056f,
- 1.09861228867f, 1.38629436112f, 0.99999932734f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Neg_Float32_CpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 1.f, 0.f, 3.f,
- 25.f, 64.f, 100.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- -1.f, 0.f, -3.f,
- -25.f, -64.f, -100.f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Rsqrt_Float32_CpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 1.f, 4.f, 16.f,
- 25.f, 64.f, 100.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 1.f, 0.5f, 0.25f,
- 0.2f, 0.125f, 0.1f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Sin_Float32_CpuAcc_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- // Set input data
- std::vector<float> inputValues
- {
- 0.0f, 1.0f, 16.0f,
- 0.5f, 36.0f, -1.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.0f, 0.8414709848f, -0.28790331666f,
- 0.4794255386f, -0.99177885344f, -0.8414709848f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
-}
-} // TEST_SUITE("ElementwiseUnary_CpuAccTests")
-
-TEST_SUITE("ElementwiseUnary_CpuRefTests")
-{
-
-TEST_CASE ("Abs_Float32_CpuRef_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- -0.1f, -0.2f, -0.3f,
- 0.1f, 0.2f, 0.3f
- };
- // Calculate output data
- std::vector<float> expectedOutputValues(inputValues.size());
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- expectedOutputValues[i] = std::abs(inputValues[i]);
- }
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Exp_Float32_CpuRef_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- 5.0f, 4.0f,
- 3.0f, 2.0f,
- 1.0f, 1.1f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 148.413159102577f, 54.598150033144f,
- 20.085536923188f, 7.389056098931f,
- 2.718281828459f, 3.004166023946f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Log_Float32_CpuRef_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- 1.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 2.71828f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.f, 0.f, 0.69314718056f,
- 1.09861228867f, 1.38629436112f, 0.99999932734f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Neg_Float32_CpuRef_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- 1.f, 0.f, 3.f,
- 25.f, 64.f, 100.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- -1.f, 0.f, -3.f,
- -25.f, -64.f, -100.f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Rsqrt_Float32_CpuRef_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- 1.f, 4.f, 16.f,
- 25.f, 64.f, 100.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 1.f, 0.5f, 0.25f,
- 0.2f, 0.125f, 0.1f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Sqrt_Float32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- 9.0f, 4.25f, 81.9f,
- 0.1f, 0.9f, 169.0f
- };
- // Calculate output data
- std::vector<float> expectedOutputValues(inputValues.size());
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- expectedOutputValues[i] = std::sqrt(inputValues[i]);
- }
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SQRT, backends, inputValues, expectedOutputValues);
-}
-
-TEST_CASE ("Sin_Float32_CpuRef_Test")
-{
- // Create the ArmNN Delegate
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- // Set input data
- std::vector<float> inputValues
- {
- 0.0f, 1.0f, 16.0f,
- 0.5f, 36.0f, -1.f
- };
- // Set output data
- std::vector<float> expectedOutputValues
- {
- 0.0f, 0.8414709848f, -0.28790331666f,
- 0.4794255386f, -0.99177885344f, -0.8414709848f
- };
-
- ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
-}
-} // TEST_SUITE("ElementwiseUnary_CpuRefTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ElementwiseUnaryTestHelper.hpp b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
deleted file mode 100644
index 230d0fcca5..0000000000
--- a/delegate/src/test/ElementwiseUnaryTestHelper.hpp
+++ /dev/null
@@ -1,189 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unaryOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& tensorShape)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
- tensorType);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
- tensorType);
-
- // create operator
- const std::vector<int> operatorInputs{0};
- const std::vector<int> operatorOutputs{1};
- flatbuffers::Offset <Operator> unaryOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
-
- const std::vector<int> subgraphInputs{0};
- const std::vector<int> subgraphOutputs{1};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&unaryOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Unary Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unaryOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& inputValues,
- std::vector<float>& expectedOutputValues)
-{
- using namespace tflite;
- std::vector<int32_t> inputShape { { 3, 1, 2} };
- std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
- ::tflite::TensorType_FLOAT32,
- inputShape);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
- armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
-}
-
-void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<bool>& inputValues,
- std::vector<bool>& expectedOutputValues)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
- ::tflite::TensorType_BOOL,
- inputShape);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
- armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
- // directly instead. This is because Boolean types get converted to a bit representation in a vector.
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace
-
-
-
-
diff --git a/delegate/src/test/FillTest.cpp b/delegate/src/test/FillTest.cpp
deleted file mode 100644
index 50f7f53d56..0000000000
--- a/delegate/src/test/FillTest.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "FillTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void Fill2dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- float fill = 2.0f )
-{
- std::vector<int32_t> inputShape { 2 };
- std::vector<int32_t> tensorShape { 2, 2 };
- std::vector<float> expectedOutputValues = { fill, fill,
- fill, fill };
-
- FillTest<float>(fillOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- tensorShape,
- expectedOutputValues,
- fill);
-}
-
-void Fill3dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- float fill = 5.0f )
-{
- std::vector<int32_t> inputShape { 3 };
- std::vector<int32_t> tensorShape { 3, 3, 3 };
- std::vector<float> expectedOutputValues = { fill, fill, fill,
- fill, fill, fill,
- fill, fill, fill,
-
- fill, fill, fill,
- fill, fill, fill,
- fill, fill, fill,
-
- fill, fill, fill,
- fill, fill, fill,
- fill, fill, fill };
-
- FillTest<float>(fillOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- tensorShape,
- expectedOutputValues,
- fill);
-}
-
-void Fill4dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- float fill = 3.0f )
-{
- std::vector<int32_t> inputShape { 4 };
- std::vector<int32_t> tensorShape { 2, 2, 4, 4 };
- std::vector<float> expectedOutputValues = { fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
-
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
-
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
-
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill,
- fill, fill, fill, fill };
-
- FillTest<float>(fillOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- tensorShape,
- expectedOutputValues,
- fill);
-}
-
-void FillInt32Test(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
- int32_t fill = 2 )
-{
- std::vector<int32_t> inputShape { 2 };
- std::vector<int32_t> tensorShape { 2, 2 };
- std::vector<int32_t> expectedOutputValues = { fill, fill,
- fill, fill };
-
- FillTest<int32_t>(fillOperatorCode,
- ::tflite::TensorType_INT32,
- backends,
- inputShape,
- tensorShape,
- expectedOutputValues,
- fill);
-}
-
-TEST_SUITE("Fill_CpuRefTests")
-{
-
-TEST_CASE ("Fill2d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill2dTest(backends);
-}
-
-TEST_CASE ("Fill3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill4d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Fill4dTest(backends);
-}
-
-TEST_CASE ("FillInt32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FillInt32Test(backends);
-}
-
-}
-
-TEST_SUITE("Fill_CpuAccTests")
-{
-
-TEST_CASE ("Fill2d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill2dTest(backends);
-}
-
-TEST_CASE ("Fill3d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill3d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill4d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Fill4dTest(backends);
-}
-
-TEST_CASE ("FillInt32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FillInt32Test(backends);
-}
-
-}
-
-TEST_SUITE("Fill_GpuAccTests")
-{
-
-TEST_CASE ("Fill2d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill2dTest(backends);
-}
-
-TEST_CASE ("Fill3d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill3d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill3dTest(backends);
-}
-
-TEST_CASE ("Fill4d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Fill4dTest(backends);
-}
-
-TEST_CASE ("FillInt32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FillInt32Test(backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/FillTestHelper.hpp b/delegate/src/test/FillTestHelper.hpp
deleted file mode 100644
index 8479b72730..0000000000
--- a/delegate/src/test/FillTestHelper.hpp
+++ /dev/null
@@ -1,159 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename T>
-std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputShape,
- const std::vector <int32_t>& tensorShape,
- const std::vector<T> fillValue)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(tensorShape.data()),
- sizeof(int32_t) * tensorShape.size())));
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(fillValue.data()),
- sizeof(T) * fillValue.size())));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
- inputShape.size()),
- tflite::TensorType_INT32,
- 1,
- flatBufferBuilder.CreateString("dims"));
-
- std::vector<int32_t> fillShape = {};
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(fillShape.data(),
- fillShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("value"));
-
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"));
-
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateFillOptions(flatBufferBuilder).Union();
-
- // create operator
- const std::vector<int> operatorInputs{ {0, 1} };
- const std::vector<int> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> fillOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1} };
- const std::vector<int> subgraphOutputs{ 2 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&fillOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Fill Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- fillOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-
-}
-
-template <typename T>
-void FillTest(tflite::BuiltinOperator fillOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- std::vector<int32_t >& inputShape,
- std::vector<int32_t >& tensorShape,
- std::vector<T>& expectedOutputValues,
- T fillValue)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
- tensorType,
- inputShape,
- tensorShape,
- {fillValue});
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/FullyConnectedTest.cpp b/delegate/src/test/FullyConnectedTest.cpp
deleted file mode 100644
index 3ef5cedbd7..0000000000
--- a/delegate/src/test/FullyConnectedTest.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-//
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "FullyConnectedTestHelper.hpp"
-
-namespace
-{
-
-void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
-{
- std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
- std::vector<int32_t> weightsTensorShape { 1, 4 };
- std::vector<int32_t> biasTensorShape { 1 };
- std::vector<int32_t> outputTensorShape { 1, 1 };
-
- std::vector<float> inputValues = { 10, 20, 30, 40 };
- std::vector<float> weightsData = { 2, 3, 4, 5 };
-
- std::vector<float> expectedOutputValues = { (400 + 10) };
-
- // bias is set std::vector<float> biasData = { 10 } in the model
- FullyConnectedTest<float>(backends,
- ::tflite::TensorType_FLOAT32,
- tflite::ActivationFunctionType_NONE,
- inputTensorShape,
- weightsTensorShape,
- biasTensorShape,
- outputTensorShape,
- inputValues,
- expectedOutputValues,
- weightsData,
- constantWeights);
-}
-
-void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
-{
- std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
- std::vector<int32_t> weightsTensorShape { 1, 4 };
- std::vector<int32_t> biasTensorShape { 1 };
- std::vector<int32_t> outputTensorShape { 1, 1 };
-
- std::vector<float> inputValues = { -10, 20, 30, 40 };
- std::vector<float> weightsData = { 2, 3, 4, -5 };
-
- std::vector<float> expectedOutputValues = { 0 };
-
- // bias is set std::vector<float> biasData = { 10 } in the model
- FullyConnectedTest<float>(backends,
- ::tflite::TensorType_FLOAT32,
- tflite::ActivationFunctionType_RELU,
- inputTensorShape,
- weightsTensorShape,
- biasTensorShape,
- outputTensorShape,
- inputValues,
- expectedOutputValues,
- weightsData,
- constantWeights);
-}
-
-void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
-{
- std::vector<int32_t> inputTensorShape { 1, 4, 2, 1 };
- std::vector<int32_t> weightsTensorShape { 1, 4 };
- std::vector<int32_t> biasTensorShape { 1 };
- std::vector<int32_t> outputTensorShape { 2, 1 };
-
- std::vector<int8_t> inputValues = { 1, 2, 3, 4, 5, 10, 15, 20 };
- std::vector<int8_t> weightsData = { 2, 3, 4, 5 };
-
- std::vector<int8_t> expectedOutputValues = { 25, 105 }; // (40 + 10) / 2, (200 + 10) / 2
-
- // bias is set std::vector<int32_t> biasData = { 10 } in the model
- // input and weights quantization scale 1.0f and offset 0 in the model
- // output quantization scale 2.0f and offset 0 in the model
- FullyConnectedTest<int8_t>(backends,
- ::tflite::TensorType_INT8,
- tflite::ActivationFunctionType_NONE,
- inputTensorShape,
- weightsTensorShape,
- biasTensorShape,
- outputTensorShape,
- inputValues,
- expectedOutputValues,
- weightsData,
- constantWeights);
-}
-
-TEST_SUITE("FullyConnected_GpuAccTests")
-{
-
-TEST_CASE ("FullyConnected_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FullyConnectedFp32Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Int8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FullyConnectedInt8Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Activation_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- FullyConnectedActivationTest(backends);
-}
-
-} // End of TEST_SUITE("FullyConnected_GpuAccTests")
-
-TEST_SUITE("FullyConnected_CpuAccTests")
-{
-
-TEST_CASE ("FullyConnected_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FullyConnectedFp32Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FullyConnectedInt8Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Activation_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- FullyConnectedActivationTest(backends);
-}
-
-} // End of TEST_SUITE("FullyConnected_CpuAccTests")
-
-TEST_SUITE("FullyConnected_CpuRefTests")
-{
-
-TEST_CASE ("FullyConnected_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedFp32Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedInt8Test(backends);
-}
-
-TEST_CASE ("FullyConnected_Activation_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedActivationTest(backends);
-}
-
-TEST_CASE ("FullyConnected_Weights_As_Inputs_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedFp32Test(backends, false);
-}
-
-TEST_CASE ("FullyConnected_Weights_As_Inputs_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedInt8Test(backends, false);
-}
-
-TEST_CASE ("FullyConnected_Weights_As_Inputs_Activation_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- FullyConnectedActivationTest(backends, false);
-}
-
-} // End of TEST_SUITE("FullyConnected_CpuRefTests")
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/FullyConnectedTestHelper.hpp b/delegate/src/test/FullyConnectedTestHelper.hpp
deleted file mode 100644
index a3f009a863..0000000000
--- a/delegate/src/test/FullyConnectedTestHelper.hpp
+++ /dev/null
@@ -1,255 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename T>
-std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
- tflite::ActivationFunctionType activationType,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& weightsTensorShape,
- const std::vector <int32_t>& biasTensorShape,
- std::vector <int32_t>& outputTensorShape,
- std::vector <T>& weightsData,
- bool constantWeights = true,
- float quantScale = 1.0f,
- int quantOffset = 0,
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
- buffers[1] = CreateBuffer(flatBufferBuilder);
-
- auto biasTensorType = ::tflite::TensorType_FLOAT32;
- if (tensorType == ::tflite::TensorType_INT8)
- {
- biasTensorType = ::tflite::TensorType_INT32;
- }
- if (constantWeights)
- {
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(weightsData.data()),
- sizeof(T) * weightsData.size()));
-
- if (tensorType == ::tflite::TensorType_INT8)
- {
- std::vector<int32_t> biasData = { 10 };
- buffers[3] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
- sizeof(int32_t) * biasData.size()));
-
- }
- else
- {
- std::vector<float> biasData = { 10 };
- buffers[3] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
- sizeof(float) * biasData.size()));
- }
- }
- else
- {
- buffers[2] = CreateBuffer(flatBufferBuilder);
- buffers[3] = CreateBuffer(flatBufferBuilder);
- }
- buffers[4] = CreateBuffer(flatBufferBuilder);
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- auto outputQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input_0"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(weightsTensorShape.data(),
- weightsTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("weights"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(),
- biasTensorShape.size()),
- biasTensorType,
- 3,
- flatBufferBuilder.CreateString("bias"),
- quantizationParameters);
-
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 4,
- flatBufferBuilder.CreateString("output"),
- outputQuantizationParameters);
-
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FullyConnectedOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreateFullyConnectedOptions(flatBufferBuilder,
- activationType,
- FullyConnectedOptionsWeightsFormat_DEFAULT, false).Union();
-
- const std::vector<int> operatorInputs{0, 1, 2};
- const std::vector<int> operatorOutputs{3};
- flatbuffers::Offset <Operator> fullyConnectedOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType, operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0, 1, 2};
- const std::vector<int> subgraphOutputs{3};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&fullyConnectedOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: FullyConnected Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_FULLY_CONNECTED);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
- tflite::ActivationFunctionType activationType,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& weightsTensorShape,
- const std::vector <int32_t>& biasTensorShape,
- std::vector <int32_t>& outputTensorShape,
- std::vector <T>& inputValues,
- std::vector <T>& expectedOutputValues,
- std::vector <T>& weightsData,
- bool constantWeights = true,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer = CreateFullyConnectedTfLiteModel(tensorType,
- activationType,
- inputTensorShape,
- weightsTensorShape,
- biasTensorShape,
- outputTensorShape,
- weightsData,
- constantWeights,
- quantScale,
- quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- if (!constantWeights)
- {
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, weightsData);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, weightsData);
-
- if (tensorType == ::tflite::TensorType_INT8)
- {
- std::vector <int32_t> biasData = {10};
- armnnDelegate::FillInput<int32_t>(tfLiteInterpreter, 2, biasData);
- armnnDelegate::FillInput<int32_t>(armnnDelegateInterpreter, 2, biasData);
- }
- else
- {
- std::vector<float> biasData = {10};
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 2, biasData);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 2, biasData);
- }
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- outputTensorShape,
- expectedOutputValues);
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/GatherNdTest.cpp b/delegate/src/test/GatherNdTest.cpp
deleted file mode 100644
index 2b4fd4207e..0000000000
--- a/delegate/src/test/GatherNdTest.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GatherNdTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// GATHER_ND Operator
-void GatherNdUint8Test(std::vector<armnn::BackendId>& backends)
-{
-
- std::vector<int32_t> paramsShape{ 5, 2 };
- std::vector<int32_t> indicesShape{ 3, 1 };
- std::vector<int32_t> expectedOutputShape{ 3, 2 };
-
- std::vector<uint8_t> paramsValues{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
- std::vector<int32_t> indicesValues{ 1, 0, 4 };
- std::vector<uint8_t> expectedOutputValues{ 3, 4, 1, 2, 9, 10 };
-
- GatherNdTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
- paramsShape,
- indicesShape,
- expectedOutputShape,
- paramsValues,
- indicesValues,
- expectedOutputValues);
-}
-
-void GatherNdFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> paramsShape{ 5, 2 };
- std::vector<int32_t> indicesShape{ 3, 1 };
- std::vector<int32_t> expectedOutputShape{ 3, 2 };
-
- std::vector<float> paramsValues{ 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.10f };
- std::vector<int32_t> indicesValues{ 1, 0, 4 };
- std::vector<float> expectedOutputValues{ 3.3f, 4.4f, 1.1f, 2.2f, 9.9f, 10.10f };
-
- GatherNdTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- paramsShape,
- indicesShape,
- expectedOutputShape,
- paramsValues,
- indicesValues,
- expectedOutputValues);
-}
-
-// GATHER_ND Test Suite
-TEST_SUITE("GATHER_ND_CpuRefTests")
-{
-
-TEST_CASE ("GATHER_ND_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherNdUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_ND_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherNdFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_ND_CpuAccTests")
-{
-
-TEST_CASE ("GATHER_ND_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherNdUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_ND_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherNdFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_ND_GpuAccTests")
-{
-
-TEST_CASE ("GATHER_ND_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherNdUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_ND_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherNdFp32Test(backends);
-}
-
-}
-// End of GATHER_ND Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/GatherNdTestHelper.hpp b/delegate/src/test/GatherNdTestHelper.hpp
deleted file mode 100644
index c2cf9ffe9d..0000000000
--- a/delegate/src/test/GatherNdTestHelper.hpp
+++ /dev/null
@@ -1,181 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
- std::vector<int32_t>& paramsShape,
- std::vector<int32_t>& indicesShape,
- const std::vector<int32_t>& expectedOutputShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
- paramsShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("params"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
- indicesShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("indices"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
- expectedOutputShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherNdOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateGatherNdOptions(flatBufferBuilder).Union();
-
- const std::vector<int> operatorInputs{{0, 1}};
- const std::vector<int> operatorOutputs{2};
- flatbuffers::Offset<Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
- operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{{0, 1}};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
- subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
- subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: GATHER_ND Operator Model");
- flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- BuiltinOperator_GATHER_ND);
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T>
-void GatherNdTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& paramsShape,
- std::vector<int32_t>& indicesShape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& paramsValues,
- std::vector<int32_t>& indicesValues,
- std::vector<T>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateGatherNdTfLiteModel(tensorType,
- paramsShape,
- indicesShape,
- expectedOutputShape,
- quantScale,
- quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
- armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
- armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- expectedOutputShape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-}
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/GatherTest.cpp b/delegate/src/test/GatherTest.cpp
deleted file mode 100644
index 6dd015173c..0000000000
--- a/delegate/src/test/GatherTest.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GatherTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// GATHER Operator
-void GatherUint8Test(std::vector<armnn::BackendId>& backends)
-{
-
- std::vector<int32_t> paramsShape{8};
- std::vector<int32_t> indicesShape{3};
- std::vector<int32_t> expectedOutputShape{3};
-
- int32_t axis = 0;
- std::vector<uint8_t> paramsValues{1, 2, 3, 4, 5, 6, 7, 8};
- std::vector<int32_t> indicesValues{7, 6, 5};
- std::vector<uint8_t> expectedOutputValues{8, 7, 6};
-
- GatherTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
- paramsShape,
- indicesShape,
- expectedOutputShape,
- axis,
- paramsValues,
- indicesValues,
- expectedOutputValues);
-}
-
-void GatherFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> paramsShape{8};
- std::vector<int32_t> indicesShape{3};
- std::vector<int32_t> expectedOutputShape{3};
-
- int32_t axis = 0;
- std::vector<float> paramsValues{1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f};
- std::vector<int32_t> indicesValues{7, 6, 5};
- std::vector<float> expectedOutputValues{8.8f, 7.7f, 6.6f};
-
- GatherTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- paramsShape,
- indicesShape,
- expectedOutputShape,
- axis,
- paramsValues,
- indicesValues,
- expectedOutputValues);
-}
-
-// GATHER Test Suite
-TEST_SUITE("GATHER_CpuRefTests")
-{
-
-TEST_CASE ("GATHER_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- GatherFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_CpuAccTests")
-{
-
-TEST_CASE ("GATHER_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- GatherFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("GATHER_GpuAccTests")
-{
-
-TEST_CASE ("GATHER_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherUint8Test(backends);
-}
-
-TEST_CASE ("GATHER_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- GatherFp32Test(backends);
-}
-
-}
-// End of GATHER Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/GatherTestHelper.hpp b/delegate/src/test/GatherTestHelper.hpp
deleted file mode 100644
index 4763e06c73..0000000000
--- a/delegate/src/test/GatherTestHelper.hpp
+++ /dev/null
@@ -1,184 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType,
- std::vector<int32_t>& paramsShape,
- std::vector<int32_t>& indicesShape,
- const std::vector<int32_t>& expectedOutputShape,
- int32_t axis,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
- paramsShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("params"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
- indicesShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("indices"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
- expectedOutputShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateGatherOptions(flatBufferBuilder).Union();
-
- const std::vector<int> operatorInputs{{0, 1}};
- const std::vector<int> operatorOutputs{2};
- flatbuffers::Offset<Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
- operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{{0, 1}};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
- subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
- subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: GATHER Operator Model");
- flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- BuiltinOperator_GATHER);
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T>
-void GatherTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& paramsShape,
- std::vector<int32_t>& indicesShape,
- std::vector<int32_t>& expectedOutputShape,
- int32_t axis,
- std::vector<T>& paramsValues,
- std::vector<int32_t>& indicesValues,
- std::vector<T>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateGatherTfLiteModel(tensorType,
- paramsShape,
- indicesShape,
- expectedOutputShape,
- axis,
- quantScale,
- quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
- armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
- armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- expectedOutputShape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-}
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/LogicalTest.cpp b/delegate/src/test/LogicalTest.cpp
deleted file mode 100644
index 9fa2d3dde0..0000000000
--- a/delegate/src/test/LogicalTest.cpp
+++ /dev/null
@@ -1,226 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ElementwiseUnaryTestHelper.hpp"
-#include "LogicalTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void LogicalBinaryAndBoolTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
-
- // Set input and output values
- std::vector<bool> input0Values { 0, 0, 1, 1 };
- std::vector<bool> input1Values { 0, 1, 0, 1 };
- std::vector<bool> expectedOutputValues { 0, 0, 0, 1 };
-
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
-
- std::vector<bool> input0Values { 0, 1, 0, 1 };
- std::vector<bool> input1Values { 1 };
- std::vector<bool> expectedOutputValues { 0, 1, 0, 1 };
-
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 2, 2 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
-
- std::vector<bool> input0Values { 0, 0, 1, 1 };
- std::vector<bool> input1Values { 0, 1, 0, 1 };
- std::vector<bool> expectedOutputValues { 0, 1, 1, 1 };
-
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> input0Shape { 1, 2, 2 };
- std::vector<int32_t> input1Shape { 1, 1, 1 };
- std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
-
- std::vector<bool> input0Values { 0, 1, 0, 1 };
- std::vector<bool> input1Values { 1 };
- std::vector<bool> expectedOutputValues { 1, 1, 1, 1 };
-
- LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
- ::tflite::TensorType_BOOL,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues);
-}
-
-// LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator.
-void LogicalNotBoolTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 1, 2, 2 };
-
- std::vector<bool> inputValues { 0, 1, 0, 1 };
- std::vector<bool> expectedOutputValues { 1, 0, 1, 0 };
-
- ElementwiseUnaryBoolTest(tflite::BuiltinOperator_LOGICAL_NOT,
- backends,
- inputShape,
- inputValues,
- expectedOutputValues);
-}
-
-TEST_SUITE("LogicalBinaryTests_GpuAccTests")
-{
-
-TEST_CASE ("LogicalBinary_AND_Bool_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LogicalBinaryAndBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_AND_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LogicalBinaryAndBroadcastTest(backends);
-}
-
-TEST_CASE ("Logical_NOT_Bool_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LogicalNotBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Bool_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LogicalBinaryOrBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Broadcast_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LogicalBinaryOrBroadcastTest(backends);
-}
-
-}
-
-
-TEST_SUITE("LogicalBinaryTests_CpuAccTests")
-{
-
-TEST_CASE ("LogicalBinary_AND_Bool_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LogicalBinaryAndBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_AND_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LogicalBinaryAndBroadcastTest(backends);
-}
-
-TEST_CASE ("Logical_NOT_Bool_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LogicalNotBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Bool_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LogicalBinaryOrBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Broadcast_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LogicalBinaryOrBroadcastTest(backends);
-}
-
-}
-
-
-TEST_SUITE("LogicalBinaryTests_CpuRefTests")
-{
-
-TEST_CASE ("LogicalBinary_AND_Bool_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LogicalBinaryAndBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_AND_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LogicalBinaryAndBroadcastTest(backends);
-}
-
-TEST_CASE ("Logical_NOT_Bool_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LogicalNotBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Bool_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LogicalBinaryOrBoolTest(backends);
-}
-
-TEST_CASE ("LogicalBinary_OR_Broadcast_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LogicalBinaryOrBroadcastTest(backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/LogicalTestHelper.hpp b/delegate/src/test/LogicalTestHelper.hpp
deleted file mode 100644
index 2a1ff2b996..0000000000
--- a/delegate/src/test/LogicalTestHelper.hpp
+++ /dev/null
@@ -1,201 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logicalOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& input0TensorShape,
- const std::vector <int32_t>& input1TensorShape,
- const std::vector <int32_t>& outputTensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
- input0TensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input_0"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
- input1TensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("input_1"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
- flatbuffers::Offset<void> operatorBuiltinOptions = 0;
- switch (logicalOperatorCode)
- {
- case BuiltinOperator_LOGICAL_AND:
- {
- operatorBuiltinOptionsType = BuiltinOptions_LogicalAndOptions;
- operatorBuiltinOptions = CreateLogicalAndOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_LOGICAL_OR:
- {
- operatorBuiltinOptionsType = BuiltinOptions_LogicalOrOptions;
- operatorBuiltinOptions = CreateLogicalOrOptions(flatBufferBuilder).Union();
- break;
- }
- default:
- break;
- }
- const std::vector<int32_t> operatorInputs{ {0, 1} };
- const std::vector<int32_t> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> logicalBinaryOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1} };
- const std::vector<int> subgraphOutputs{ 2 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&logicalBinaryOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Logical Binary Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, logicalOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& input0Shape,
- std::vector<int32_t>& input1Shape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& input0Values,
- std::vector<T>& input1Values,
- std::vector<T>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode,
- tensorType,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for the armnn interpreter
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
-
- // Set input data for the tflite interpreter
- armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
- // directly. This is because Boolean types get converted to a bit representation in a vector.
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-
- armnnDelegateInterpreter.reset(nullptr);
- tfLiteInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/LstmTest.cpp b/delegate/src/test/LstmTest.cpp
deleted file mode 100644
index 1fa9f0c8bf..0000000000
--- a/delegate/src/test/LstmTest.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "LstmTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void LstmTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 2;
- int32_t inputSize = 2;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- std::vector<int32_t> inputShape {batchSize , inputSize};
- std::vector<int32_t> cellStateInTensorInfo {batchSize , numUnits};
- std::vector<int32_t> outputStateInTensorInfo {batchSize , outputSize};
-
- std::vector<int32_t> scratchBufferTensorInfo {batchSize, numUnits * 4};
- std::vector<int32_t> cellStateOutTensorInfo {batchSize, numUnits};
- std::vector<int32_t> outputStateOutTensorInfo {batchSize, outputSize};
- std::vector<int32_t> outputTensorInfo {batchSize, outputSize};
-
- std::vector<int32_t> tensorInfo4 {numUnits};
- std::vector<int32_t> tensorInfo8 {numUnits, 2};
- std::vector<int32_t> tensorInfo16 {numUnits, 4};
-
- //tensorInfo8,
- bool hasInputToInputWeights = true;
- std::vector<float> inputToInputWeights {-0.45018822f, -0.02338299f, -0.0870589f,
- -0.34550029f, 0.04266912f, -0.15680569f,
- -0.34856534f, 0.43890524f};
-
- std::vector<float> inputToForgetWeights {0.09701663f, 0.20334584f, -0.50592935f,
- -0.31343272f, -0.40032279f, 0.44781327f,
- 0.01387155f, -0.35593212f};
-
- std::vector<float> inputToCellWeights {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
- -0.20583314f, 0.44344562f, 0.22077113f,
- -0.29909778f};
-
- std::vector<float> inputToOutputWeights {-0.25065863f, -0.28290087f, 0.04613829f,
- 0.40525138f, 0.44272184f, 0.03897077f,
- -0.1556896f, 0.19487578f};
-
- //tensorInfo16,
- bool hasRecurrentToInputWeights = true;
- std::vector<float> recurrentToInputWeights {-0.0063535f, -0.2042388f, 0.31454784f,
- -0.35746509f, 0.28902304f, 0.08183324f,
- -0.16555229f, 0.02286911f, -0.13566875f,
- 0.03034258f, 0.48091322f, -0.12528998f,
- 0.24077177f, -0.51332325f, -0.33502164f,
- 0.10629296f};
-
- std::vector<float> recurrentToForgetWeights {-0.48684245f, -0.06655136f, 0.42224967f,
- 0.2112639f, 0.27654213f, 0.20864892f,
- -0.07646349f, 0.45877004f, 0.00141793f,
- -0.14609534f, 0.36447752f, 0.09196436f,
- 0.28053468f, 0.01560611f, -0.20127171f,
- -0.01140004f};
-
- std::vector<float> recurrentToCellWeights {-0.3407414f, 0.24443203f, -0.2078532f,
- 0.26320225f, 0.05695659f, -0.00123841f,
- -0.4744786f, -0.35869038f, -0.06418842f,
- -0.13502428f, -0.501764f, 0.22830659f,
- -0.46367589f, 0.26016325f, -0.03894562f,
- -0.16368064f};
-
- std::vector<float> recurrentToOutputWeights {0.43385774f, -0.17194885f, 0.2718237f,
- 0.09215671f, 0.24107647f, -0.39835793f,
- 0.18212086f, 0.01301402f, 0.48572797f,
- -0.50656658f, 0.20047462f, -0.20607421f,
- -0.51818722f, -0.15390486f, 0.0468148f,
- 0.39922136f};
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<float> cellToInputWeights {};
- bool hasCellToForgetWeights = false;
- std::vector<float> cellToForgetWeights {};
- bool hasCellToOutputWeights = false;
- std::vector<float> cellToOutputWeights {};
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias {0., 0., 0., 0.};
- std::vector<float> forgetGateBias {1., 1., 1., 1.};
- std::vector<float> cellBias {0., 0., 0., 0.};
- std::vector<float> outputGateBias {0., 0., 0., 0.};
-
- bool hasProjectionWeights = false;
- std::vector<float> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues {2., 3., 3., 4.};
- std::vector<float> expectedOutputValues {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
- -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f};
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 0.f;
- float clippingThresProj = 0.f;
-
- LstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
- batchSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj);
-}
-
-TEST_SUITE("LstmTest_CpuRefTests")
-{
-
-TEST_CASE ("LstmTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- LstmTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_CpuRef")
-
-TEST_SUITE("LstmTest_CpuAccTests")
-{
-
-TEST_CASE ("LstmTest_CpuAcc_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- LstmTest(backends);
-}
-
-} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/LstmTestHelper.hpp b/delegate/src/test/LstmTestHelper.hpp
deleted file mode 100644
index 082d5dea91..0000000000
--- a/delegate/src/test/LstmTestHelper.hpp
+++ /dev/null
@@ -1,691 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-#include <tensorflow/lite/c/common.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename T>
-std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
- int32_t batchSize,
- int32_t inputSize,
- int32_t outputSize,
- int32_t numUnits,
- bool hasInputToInputWeights,
- const std::vector<T>& inputToInputWeights,
- const std::vector<T>& inputToForgetWeights,
- const std::vector<T>& inputToCellWeights,
- const std::vector<T>& inputToOutputWeights,
- bool hasRecurrentToInputWeights,
- const std::vector<T>& recurrentToInputWeights,
- const std::vector<T>& recurrentToForgetWeights,
- const std::vector<T>& recurrentToCellWeights,
- const std::vector<T>& recurrentToOutputWeights,
- bool hasCellToInputWeights,
- const std::vector<T>& cellToInputWeights,
- bool hasCellToForgetWeights,
- const std::vector<T>& cellToForgetWeights,
- bool hasCellToOutputWeights,
- const std::vector<T>& cellToOutputWeights,
- bool hasInputGateBias,
- const std::vector<T>& inputGateBias,
- const std::vector<T>& forgetGateBias,
- const std::vector<T>& cellBias,
- const std::vector<T>& outputGateBias,
- bool hasProjectionWeights,
- const std::vector<T>& projectionWeights,
- bool hasProjectionBias,
- const std::vector<T>& projectionBias,
- bool hasInputLayerNormWeights,
- const std::vector<T>& inputLayerNormWeights,
- bool hasForgetLayerNormWeights,
- const std::vector<T>& forgetLayerNormWeights,
- bool hasCellLayerNormWeights,
- const std::vector<T>& cellLayerNormWeights,
- bool hasOutputLayerNormWeights,
- const std::vector<T>& outputLayerNormWeights,
- tflite::ActivationFunctionType activationFunction,
- float clippingThresCell,
- float clippingThresProj,
- float quantScale = 1.0f,
- int quantOffset = 0,
- float outputQuantScale = 2.0f,
- int outputQuantOffset = 0)
-{
-
- std::vector <int32_t> tensorInfo0 {};
- std::vector <int32_t> tensorInfo4 {numUnits};
- std::vector <int32_t> tensorInfo8 {numUnits, static_cast<int32_t>(2)};
- std::vector <int32_t> tensorInfo16 {numUnits, static_cast<int32_t>(4)};
-
- std::vector<int32_t> inputShape {batchSize , inputSize};
- std::vector<int32_t> outputShape {batchSize , outputSize};
-
- std::vector<int32_t> outputStateInDimensions{batchSize, outputSize};
- std::vector<int32_t> cellStateInDimensions{batchSize, numUnits};
-
- std::vector<int> operatorInputs;
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- std::vector<flatbuffers::Offset<Tensor>> tensors;
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- auto outputQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
- inputShape.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("input_0"),
- quantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- if (hasInputToInputWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToInputWeights.data()),
- sizeof(T) * inputToInputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
- tensorInfo8.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToInputWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToForgetWeights.data()),
- sizeof(T) * inputToForgetWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
- tensorInfo8.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToForgetWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToCellWeights.data()),
- sizeof(T) * inputToCellWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
- tensorInfo8.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToCellWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToOutputWeights.data()),
- sizeof(T) * inputToOutputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
- tensorInfo8.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToOutputWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- if (hasRecurrentToInputWeights)
- {
- buffers.push_back(CreateBuffer(
- flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(recurrentToInputWeights.data()),
- sizeof(T) * recurrentToInputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
- tensorInfo16.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToInputWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToForgetWeights.data()),
- sizeof(T) * recurrentToForgetWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
- tensorInfo16.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToForgetWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToCellWeights.data()),
- sizeof(T) * recurrentToCellWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
- tensorInfo16.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToCellWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToOutputWeights.data()),
- sizeof(T) * recurrentToOutputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
- tensorInfo16.size()),
- tensorType,
- buffers.size() - 1 ,
- flatBufferBuilder.CreateString("recurrentToOutputWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- if (hasCellToInputWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToInputWeights.data()),
- sizeof(T) * cellToInputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellToInputWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasCellToForgetWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToForgetWeights.data()),
- sizeof(T) * cellToForgetWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellToForgetWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasCellToOutputWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToOutputWeights.data()),
- sizeof(T) * cellToOutputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellToOutputWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasInputGateBias)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputGateBias.data()),
- sizeof(T) * inputGateBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputGateBias"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(forgetGateBias.data()),
- sizeof(T) * forgetGateBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("forgetGateBias"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellBias.data()),
- sizeof(T) * cellBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellBias"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(outputGateBias.data()),
- sizeof(T) * outputGateBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputGateBias"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
-
- if (hasProjectionWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionWeights.data()),
- sizeof(T) * projectionWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputGateBias"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasProjectionBias)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionBias.data()),
- sizeof(T) * projectionBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("projectionBias"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
- outputStateInDimensions.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputStateInInfo"),
- outputQuantizationParameters,
- true));
- operatorInputs.push_back(buffers.size() - 1);
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
- cellStateInDimensions.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellStateInInfo"),
- outputQuantizationParameters,
- true));
- operatorInputs.push_back(buffers.size() - 1);
-
- if (hasInputLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(inputLayerNormWeights.data()),
- sizeof(T) * inputLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputLayerNormWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasForgetLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(forgetLayerNormWeights.data()),
- sizeof(T) * forgetLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("forgetLayerNormWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasCellLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellLayerNormWeights.data()),
- sizeof(T) * cellLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellLayerNormWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasOutputLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(outputLayerNormWeights.data()),
- sizeof(T) * outputLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
- tensorInfo4.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputLayerNormWeights"),
- outputQuantizationParameters));
- operatorInputs.push_back(buffers.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
- int outputBufferId = buffers.size();
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
- outputShape.size()),
- tensorType,
- outputBufferId,
- flatBufferBuilder.CreateString("output"),
- outputQuantizationParameters));
- std::vector<int> operatorOutputs;
- operatorOutputs.push_back(buffers.size() - 1);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_LSTMOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreateLSTMOptions(flatBufferBuilder,
- activationFunction,
- clippingThresCell,
- clippingThresProj).Union();
-
- flatbuffers::Offset <Operator> lstmOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType, operatorBuiltinOptions);
-
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- flatBufferBuilder.CreateVector(&lstmOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: LSTM Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_LSTM);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void LstmTestImpl(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
- int32_t batchSize,
- int32_t inputSize,
- int32_t outputSize,
- int32_t numUnits,
- bool hasInputToInputWeights,
- const std::vector<T>& inputToInputWeights,
- const std::vector<T>& inputToForgetWeights,
- const std::vector<T>& inputToCellWeights,
- const std::vector<T>& inputToOutputWeights,
- bool hasRecurrentToInputWeights,
- const std::vector<T>& recurrentToInputWeights,
- const std::vector<T>& recurrentToForgetWeights,
- const std::vector<T>& recurrentToCellWeights,
- const std::vector<T>& recurrentToOutputWeights,
- bool hasCellToInputWeights,
- const std::vector<T>& cellToInputWeights,
- bool hasCellToForgetWeights,
- const std::vector<T>& cellToForgetWeights,
- bool hasCellToOutputWeights,
- const std::vector<T>& cellToOutputWeights,
- bool hasInputGateBias,
- const std::vector<T>& inputGateBias,
- const std::vector<T>& forgetGateBias,
- const std::vector<T>& cellBias,
- const std::vector<T>& outputGateBias,
- bool hasProjectionWeights,
- const std::vector<T>& projectionWeights,
- bool hasProjectionBias,
- const std::vector<T>& projectionBias,
- bool hasInputLayerNormWeights,
- const std::vector<T>& inputLayerNormWeights,
- bool hasForgetLayerNormWeights,
- const std::vector<T>& forgetLayerNormWeights,
- bool hasCellLayerNormWeights,
- const std::vector<T>& cellLayerNormWeights,
- bool hasOutputLayerNormWeights,
- const std::vector<T>& outputLayerNormWeights,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- tflite::ActivationFunctionType activationFunction,
- float clippingThresCell,
- float clippingThresProj)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer = CreateLstmTfLiteModel(tensorType,
- batchSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- activationFunction,
- clippingThresCell,
- clippingThresProj);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/MirrorPadTest.cpp b/delegate/src/test/MirrorPadTest.cpp
deleted file mode 100644
index ca66181a30..0000000000
--- a/delegate/src/test/MirrorPadTest.cpp
+++ /dev/null
@@ -1,341 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "PadTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void MirrorPadSymmetric2dTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 3, 3 };
- std::vector<int32_t> outputShape { 7, 7 };
- std::vector<int32_t> paddingShape { 2, 2 };
-
- std::vector<float> inputValues =
- {
- 1.0f, 2.0f, 3.0f,
- 4.0f, 5.0f, 6.0f,
- 7.0f, 8.0f, 9.0f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
- 2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
- 2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
- 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
- 8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
- 8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
- 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f
- };
-
- std::vector<int32_t> paddingDim = { 2, 2, 2, 2 };
-
- PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- 0, // Padding value - Not used in these tests.
- 1.0f, // Scale
- 0, // Offset
- tflite::MirrorPadMode_SYMMETRIC);
-}
-
-void MirrorPadReflect2dTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 3, 3 };
- std::vector<int32_t> outputShape { 7, 7 };
- std::vector<int32_t> paddingShape { 2, 2 };
-
- std::vector<float> inputValues =
- {
- 1.0f, 2.0f, 3.0f,
- 4.0f, 5.0f, 6.0f,
- 7.0f, 8.0f, 9.0f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
- 3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
- 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
- 9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
- 3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f
- };
-
- std::vector<int32_t> paddingDim = { 2, 2, 2, 2 };
-
- PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- 0, // Padding value - Not used in these tests.
- 1.0f, // Scale
- 0, // Offset
- tflite::MirrorPadMode_REFLECT);
-}
-
-void MirrorPadSymmetric3dTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2 };
- std::vector<int32_t> outputShape { 4, 4, 4 };
- std::vector<int32_t> paddingShape { 3, 2 };
-
- std::vector<float> inputValues =
- {
- // Channel 0, Height (2) x Width (2)
- 1.0f, 2.0f,
- 3.0f, 4.0f,
-
- // Channel 1, Height (2) x Width (2)
- 5.0f, 6.0f,
- 7.0f, 8.0f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 1.0f, 1.0f, 2.0f, 2.0f,
- 1.0f, 1.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 4.0f, 4.0f,
- 3.0f, 3.0f, 4.0f, 4.0f,
-
- 1.0f, 1.0f, 2.0f, 2.0f,
- 1.0f, 1.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 4.0f, 4.0f,
- 3.0f, 3.0f, 4.0f, 4.0f,
-
- 5.0f, 5.0f, 6.0f, 6.0f,
- 5.0f, 5.0f, 6.0f, 6.0f,
- 7.0f, 7.0f, 8.0f, 8.0f,
- 7.0f, 7.0f, 8.0f, 8.0f,
-
- 5.0f, 5.0f, 6.0f, 6.0f,
- 5.0f, 5.0f, 6.0f, 6.0f,
- 7.0f, 7.0f, 8.0f, 8.0f,
- 7.0f, 7.0f, 8.0f, 8.0f
- };
-
- std::vector<int32_t> paddingDim = { 1, 1, 1, 1, 1, 1 };
-
- PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- 0, // Padding value - Not used in these tests.
- 1.0f, // Scale
- 0, // Offset
- tflite::MirrorPadMode_SYMMETRIC);
-}
-
-void MirrorPadReflect3dTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2 };
- std::vector<int32_t> outputShape { 4, 4, 4 };
- std::vector<int32_t> paddingShape { 3, 2 };
-
- std::vector<float> inputValues =
- {
- // Channel 0, Height (2) x Width (2)
- 1.0f, 2.0f,
- 3.0f, 4.0f,
-
- // Channel 1, Height (2) x Width (2)
- 5.0f, 6.0f,
- 7.0f, 8.0f
- };
-
- std::vector<float> expectedOutputValues =
- {
- 8.0f, 7.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 6.0f, 5.0f,
- 8.0f, 7.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 6.0f, 5.0f,
-
- 4.0f, 3.0f, 4.0f, 3.0f,
- 2.0f, 1.0f, 2.0f, 1.0f,
- 4.0f, 3.0f, 4.0f, 3.0f,
- 2.0f, 1.0f, 2.0f, 1.0f,
-
- 8.0f, 7.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 6.0f, 5.0f,
- 8.0f, 7.0f, 8.0f, 7.0f,
- 6.0f, 5.0f, 6.0f, 5.0f,
-
- 4.0f, 3.0f, 4.0f, 3.0f,
- 2.0f, 1.0f, 2.0f, 1.0f,
- 4.0f, 3.0f, 4.0f, 3.0f,
- 2.0f, 1.0f, 2.0f, 1.0f
- };
-
- std::vector<int32_t> paddingDim = { 1, 1, 1, 1, 1, 1 };
-
- PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- 0, // Padding value - Not used in these tests.
- 1.0f, // Scale
- 0, // Offset
- tflite::MirrorPadMode_REFLECT);
-}
-
-void MirrorPadSymmetricUint8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 3, 3 };
- std::vector<int32_t> outputShape { 5, 7 };
- std::vector<int32_t> paddingShape { 2, 2 };
-
- std::vector<uint8_t> inputValues =
- {
- 1, 2, 3,
- 4, 5, 6,
- 7, 8, 9
- };
-
- std::vector<uint8_t> expectedOutputValues =
- {
- 2, 1, 1, 2, 3, 3, 2,
- 2, 1, 1, 2, 3, 3, 2,
- 5, 4, 4, 5, 6, 6, 5,
- 8, 7, 7, 8, 9, 9, 8,
- 8, 7, 7, 8, 9, 9, 8,
- };
-
- std::vector<int32_t> paddingDim = { 1, 1, 2, 2 };
-
- PadTest<uint8_t>(tflite::BuiltinOperator_MIRROR_PAD,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- 0, // Padding value - Not used in these tests.
- 1.0f, // Scale
- 1, // Offset
- tflite::MirrorPadMode_SYMMETRIC);
-}
-
-void MirrorPadReflectInt8Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 3, 3 };
- std::vector<int32_t> outputShape { 7, 5 };
- std::vector<int32_t> paddingShape { 2, 2 };
-
- std::vector<int8_t> inputValues =
- {
- 1, 2, 3,
- 4, 5, 6,
- 7, 8, 9
- };
-
- std::vector<int8_t> expectedOutputValues =
- {
- 8, 7, 8, 9, 8,
- 5, 4, 5, 6, 5,
- 2, 1, 2, 3, 2,
- 5, 4, 5, 6, 5,
- 8, 7, 8, 9, 8,
- 5, 4, 5, 6, 5,
- 2, 1, 2, 3, 2
- };
-
- std::vector<int32_t> paddingDim = { 2, 2, 1, 1 };
-
- PadTest<int8_t>(tflite::BuiltinOperator_MIRROR_PAD,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- 0, // Padding value - Not used in these tests.
- 1.0f, // Scale
- 1, // Offset
- tflite::MirrorPadMode_REFLECT);
-}
-
-TEST_SUITE("MirrorPad_CpuRefTests")
-{
-
-TEST_CASE ("MirrorPadSymmetric2d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MirrorPadSymmetric2dTest(backends);
-}
-
-TEST_CASE ("MirrorPadReflect2d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MirrorPadReflect2dTest(backends);
-}
-
-TEST_CASE ("MirrorPadSymmetric3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MirrorPadSymmetric3dTest(backends);
-}
-
-TEST_CASE ("MirrorPadReflect3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MirrorPadReflect3dTest(backends);
-}
-
-TEST_CASE ("MirrorPadSymmetricUint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MirrorPadSymmetricUint8Test(backends);
-}
-
-TEST_CASE ("MirrorPadSymmetricInt8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MirrorPadReflectInt8Test(backends);
-}
-
-} // TEST_SUITE("MirrorPad_CpuRefTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp b/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp
deleted file mode 100644
index a437a08a49..0000000000
--- a/delegate/src/test/NeonDelegateTests_NDK_Issue.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NormalizationTestHelper.hpp"
-#include "SoftmaxTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-// There's a known Android NDK bug which causes this subset of Neon Tests to
-// fail. We'll exclude these tests in if we're doing
-// a debug build and NDK is less than r21.
-// The exclusion takes place in test/CMakeLists.txt
-// https://github.com/android/ndk/issues/1135
-
-TEST_SUITE ("Softmax_CpuAccTests")
-{
-
-TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
- 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
-}
-
-TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<float> expectedOutput = {
- 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
- 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
-}
-
-TEST_CASE ("Log_Softmax_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- std::vector<float> expectedOutput =
- {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
- -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_CpuAccTests")
-
-TEST_SUITE("L2Normalization_CpuAccTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2NormalizationTest(backends);
-}
-} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
-} \ No newline at end of file
diff --git a/delegate/src/test/NormalizationTest.cpp b/delegate/src/test/NormalizationTest.cpp
deleted file mode 100644
index e33dcf056e..0000000000
--- a/delegate/src/test/NormalizationTest.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NormalizationTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE("L2Normalization_CpuRefTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2NormalizationTest(backends);
-}
-
-} // TEST_SUITE("L2Normalization_CpuRefTests")
-
-TEST_SUITE("L2Normalization_GpuAccTests")
-{
-
-TEST_CASE ("L2NormalizationFp32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2NormalizationTest(backends);
-}
-
-} // TEST_SUITE("L2Normalization_GpuAccTests")
-
-TEST_SUITE("LocalResponseNormalization_CpuRefTests")
-{
-
-TEST_CASE ("LocalResponseNormalizationTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
-}
-
-} // TEST_SUITE("LocalResponseNormalization_CpuRefTests")
-
-TEST_SUITE("LocalResponseNormalization_CpuAccTests")
-{
-
-TEST_CASE ("LocalResponseNormalizationTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
-}
-
-} // TEST_SUITE("LocalResponseNormalization_CpuAccTests")
-
-TEST_SUITE("LocalResponseNormalization_GpuAccTests")
-{
-
-TEST_CASE ("LocalResponseNormalizationTest_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
-}
-
-} // TEST_SUITE("LocalResponseNormalization_GpuAccTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/NormalizationTestHelper.hpp b/delegate/src/test/NormalizationTestHelper.hpp
deleted file mode 100644
index 510b578c02..0000000000
--- a/delegate/src/test/NormalizationTestHelper.hpp
+++ /dev/null
@@ -1,263 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normalizationOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- int32_t radius,
- float bias,
- float alpha,
- float beta,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- auto inputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, outputTensor };
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::vector<int32_t> operatorInputs = { 0 };
- std::vector<int> subgraphInputs = { 0 };
-
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_L2NormOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateL2NormOptions(flatBufferBuilder,
- tflite::ActivationFunctionType_NONE).Union();
-
- if (normalizationOperatorCode == tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION)
- {
- operatorBuiltinOptionsType = BuiltinOptions_LocalResponseNormalizationOptions;
- operatorBuiltinOptions =
- CreateLocalResponseNormalizationOptions(flatBufferBuilder, radius, bias, alpha, beta).Union();
- }
-
- // create operator
- const std::vector<int32_t> operatorOutputs{ 1 };
- flatbuffers::Offset <Operator> normalizationOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphOutputs{ 1 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&normalizationOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Normalization Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- normalizationOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- const std::vector<int32_t>& inputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- int32_t radius = 0,
- float bias = 0.f,
- float alpha = 0.f,
- float beta = 0.f,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateNormalizationTfLiteModel(normalizationOperatorCode,
- tensorType,
- inputShape,
- outputShape,
- radius,
- bias,
- alpha,
- beta,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
-}
-
-void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 1, 1, 10 };
- std::vector<int32_t> outputShape { 1, 1, 1, 10 };
-
- std::vector<float> inputValues
- {
- 1.0f,
- 2.0f,
- 3.0f,
- 4.0f,
- 5.0f,
- 6.0f,
- 7.0f,
- 8.0f,
- 9.0f,
- 10.0f
- };
-
- const float approxInvL2Norm = 0.050964719f;
- std::vector<float> expectedOutputValues
- {
- 1.0f * approxInvL2Norm,
- 2.0f * approxInvL2Norm,
- 3.0f * approxInvL2Norm,
- 4.0f * approxInvL2Norm,
- 5.0f * approxInvL2Norm,
- 6.0f * approxInvL2Norm,
- 7.0f * approxInvL2Norm,
- 8.0f * approxInvL2Norm,
- 9.0f * approxInvL2Norm,
- 10.0f * approxInvL2Norm
- };
-
- NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
- int32_t radius,
- float bias,
- float alpha,
- float beta)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2, 1 };
- std::vector<int32_t> outputShape { 2, 2, 2, 1 };
-
- std::vector<float> inputValues
- {
- 1.0f, 2.0f,
- 3.0f, 4.0f,
- 5.0f, 6.0f,
- 7.0f, 8.0f
- };
-
- std::vector<float> expectedOutputValues
- {
- 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
- 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
- };
-
- NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- radius,
- bias,
- alpha,
- beta);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/PackTest.cpp b/delegate/src/test/PackTest.cpp
deleted file mode 100644
index aea903bcd0..0000000000
--- a/delegate/src/test/PackTest.cpp
+++ /dev/null
@@ -1,516 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "PackTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-template <typename T>
-void PackFp32Axis0Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 2, 3, 2, 3 };
-
- std::vector<std::vector<T>> inputValues;
- inputValues.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputValues.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> expectedOutputValues =
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18,
-
-
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- };
-
- PackTest<T>(tflite::BuiltinOperator_PACK,
- tensorType,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 0);
-}
-
-template <typename T>
-void PackFp32Axis1Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 3, 2, 2, 3 };
-
- std::vector<std::vector<T>> inputValues;
- inputValues.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputValues.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> expectedOutputValues =
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 19, 20, 21,
- 22, 23, 24,
-
-
- 7, 8, 9,
- 10, 11, 12,
-
- 25, 26, 27,
- 28, 29, 30,
-
-
- 13, 14, 15,
- 16, 17, 18,
-
- 31, 32, 33,
- 34, 35, 36
- };
-
- PackTest<T>(tflite::BuiltinOperator_PACK,
- tensorType,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 1);
-}
-
-template <typename T>
-void PackFp32Axis2Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 3, 2, 2, 3 };
-
- std::vector<std::vector<T>> inputValues;
- inputValues.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputValues.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<float> expectedOutputValues =
- {
- 1, 2, 3,
- 19, 20, 21,
-
- 4, 5, 6,
- 22, 23, 24,
-
- 7, 8, 9,
- 25, 26, 27,
-
- 10, 11, 12,
- 28, 29, 30,
-
- 13, 14, 15,
- 31, 32, 33,
-
- 16, 17, 18,
- 34, 35, 36
- };
-
- PackTest<T>(tflite::BuiltinOperator_PACK,
- tensorType,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 2);
-}
-
-template <typename T>
-void PackFp32Axis3Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3 };
- std::vector<int32_t> expectedOutputShape { 3, 2, 3, 2 };
-
- std::vector<std::vector<T>> inputValues;
- inputValues.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputValues.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> expectedOutputValues =
- {
- 1, 19,
- 2, 20,
- 3, 21,
-
- 4, 22,
- 5, 23,
- 6, 24,
-
-
- 7, 25,
- 8, 26,
- 9, 27,
-
- 10, 28,
- 11, 29,
- 12, 30,
-
-
- 13, 31,
- 14, 32,
- 15, 33,
-
- 16, 34,
- 17, 35,
- 18, 36
- };
-
- PackTest<T>(tflite::BuiltinOperator_PACK,
- tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 3);
-}
-
-template <typename T>
-void PackFp32Inputs3Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 3 };
- std::vector<int32_t> expectedOutputShape { 3, 3, 3 };
-
- std::vector<std::vector<T>> inputValues;
- inputValues.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
- 7, 8, 9
- });
-
- inputValues.push_back(
- {
- 10, 11, 12,
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputValues.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
- 25, 26, 27
- });
-
- std::vector<T> expectedOutputValues =
- {
- 1, 2, 3,
- 10, 11, 12,
- 19, 20, 21,
-
- 4, 5, 6,
- 13, 14, 15,
- 22, 23, 24,
-
- 7, 8, 9,
- 16, 17, 18,
- 25, 26, 27
- };
-
- PackTest<T>(tflite::BuiltinOperator_PACK,
- tensorType,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 1);
-}
-
-TEST_SUITE("Pack_CpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Pack_Fp32_Axis0_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis1_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis2_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Inputs3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Uint8_Axis0_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Pack_Uint8_Inputs3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Int8_Axis0_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-TEST_CASE ("Pack_Int8_Inputs3_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-}
-
-TEST_SUITE("Pack_GpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Pack_Fp32_Axis0_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis1_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis2_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Inputs3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Uint8_Axis0_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Pack_Uint8_Inputs3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-// Int8
-TEST_CASE ("Pack_Int8_Axis0_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-TEST_CASE ("Pack_Int8_Inputs3_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-}
-
-TEST_SUITE("Pack_CpuRefTests")
-{
-
-// Fp32
-TEST_CASE ("Pack_Fp32_Axis0_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis1_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis2_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Axis3_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Pack_Fp32_Inputs3_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Pack_Uint8_Axis0_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Pack_Uint8_Inputs3_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-// Int8
-TEST_CASE ("Pack_Int8_Axis0_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-TEST_CASE ("Pack_Int8_Inputs3_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/PackTestHelper.hpp b/delegate/src/test/PackTestHelper.hpp
deleted file mode 100644
index a9e2ee17bc..0000000000
--- a/delegate/src/test/PackTestHelper.hpp
+++ /dev/null
@@ -1,186 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode,
- tflite::TensorType tensorType,
- std::vector<int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- const int32_t inputTensorNum,
- unsigned int axis = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::vector<int32_t> operatorInputs{};
- const std::vector<int32_t> operatorOutputs{inputTensorNum};
- std::vector<int> subgraphInputs{};
- const std::vector<int> subgraphOutputs{inputTensorNum};
-
- std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
- for (int i = 0; i < inputTensorNum; ++i)
- {
- tensors[i] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input" + std::to_string(i)),
- quantizationParameters);
-
- // Add number of inputs to vector.
- operatorInputs.push_back(i);
- subgraphInputs.push_back(i);
- }
-
- // Create output tensor
- tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_PackOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreatePackOptions(flatBufferBuilder, inputTensorNum, axis).Union();
-
- flatbuffers::Offset <Operator> packOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&packOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Pack Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, packOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void PackTest(tflite::BuiltinOperator packOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<std::vector<T>>& inputValues,
- std::vector<T>& expectedOutputValues,
- unsigned int axis = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreatePackTfLiteModel(packOperatorCode,
- tensorType,
- inputShape,
- expectedOutputShape,
- inputValues.size(),
- axis,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for all input tensors.
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- // Get single input tensor and assign to interpreters.
- auto inputTensorValues = inputValues[i];
- armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/PadTest.cpp b/delegate/src/test/PadTest.cpp
deleted file mode 100644
index 4721b685cc..0000000000
--- a/delegate/src/test/PadTest.cpp
+++ /dev/null
@@ -1,606 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "PadTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void Pad2dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
- float pad = 0.0f)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2 };
- std::vector<int32_t> outputShape { 3, 5, 6 };
- std::vector<int32_t> paddingShape { 3, 2 };
-
- std::vector<float> inputValues = { 0.0f, 4.0f,
- 2.0f, -5.0f,
- 6.0f, 1.0f,
- 5.0f, -2.0f };
-
- std::vector<float> expectedOutputValues = { pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, 0.0f, 4.0f, pad, pad,
- pad, pad, 2.0f, -5.0f, pad, pad,
- pad, pad, pad, pad, pad, pad,
-
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, 6.0f, 1.0f, pad, pad,
- pad, pad, 5.0f, -2.0f, pad, pad,
- pad, pad, pad, pad, pad, pad,
-
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad };
-
- std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
-
- PadTest<float>(padOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- pad);
-}
-
-void Pad3dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
- float pad = 0.0f)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2 };
- std::vector<int32_t> outputShape { 3, 5, 6 };
- std::vector<int32_t> paddingShape { 3, 2 };
-
- std::vector<float> inputValues = { 0.0f, 4.0f,
- 2.0f, 5.0f,
- 6.0f, 1.0f,
- 5.0f, 2.0f };
-
- std::vector<float> expectedOutputValues = { pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, 0.0f, 4.0f, pad, pad,
- pad, pad, 2.0f, 5.0f, pad, pad,
- pad, pad, pad, pad, pad, pad,
-
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, 6.0f, 1.0f, pad, pad,
- pad, pad, 5.0f, 2.0f, pad, pad,
- pad, pad, pad, pad, pad, pad,
-
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad,
- pad, pad, pad, pad, pad, pad };
-
- std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
-
- PadTest<float>(padOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- pad);
-}
-
-void Pad4dTest(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
- float pad = 0.0f)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 3, 2 };
- std::vector<int32_t> outputShape { 4, 5, 7, 4 };
- std::vector<int32_t> paddingShape { 4, 2 };
-
- std::vector<float> inputValues = { 0.0f, 1.0f,
- 2.0f, 3.0f,
- 4.0f, 5.0f,
-
- 6.0f, 7.0f,
- 8.0f, 9.0f,
- 10.0f, 11.0f,
-
- 12.0f, 13.0f,
- 14.0f, 15.0f,
- 16.0f, 17.0f,
-
- 18.0f, 19.0f,
- 20.0f, 21.0f,
- 22.0f, 23.0f };
-
- std::vector<float> expectedOutputValues = { pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, 0.0f, 1.0f, pad,
- pad, 2.0f, 3.0f, pad,
- pad, 4.0f, 5.0f, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, 6.0f, 7.0f, pad,
- pad, 8.0f, 9.0f, pad,
- pad, 10.0f, 11.0f, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, 12.0f, 13.0f, pad,
- pad, 14.0f, 15.0f, pad,
- pad, 16.0f, 17.0f, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, 18.0f, 19.0f, pad,
- pad, 20.0f, 21.0f, pad,
- pad, 22.0f, 23.0f, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
-
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad,
- pad, pad, pad, pad };
-
- std::vector<int32_t> paddingDim = { 1, 1, 2, 1, 3, 1, 1, 1 };
-
- PadTest<float>(padOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- pad);
-}
-
-void PadInt8Test(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
- int8_t paddingValue = 0,
- int8_t p = 3,
- float quantizationScale = -2.0f,
- int32_t quantizationOffset = 3)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2 };
- std::vector<int32_t> outputShape { 3, 5, 6 };
- std::vector<int32_t> paddingShape { 3, 2 };
-
- std::vector<int8_t> inputValues = { 0, 4,
- 2, -5,
- 6, 1,
- 5, -2 };
-
- std::vector<int8_t> expectedOutputValues = { p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, 0, 4, p, p,
- p, p, 2, -5, p, p,
- p, p, p, p, p, p,
-
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, 6, 1, p, p,
- p, p, 5, -2, p, p,
- p, p, p, p, p, p,
-
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, p, p, p, p };
-
- std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
-
- PadTest<int8_t>(padOperatorCode,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- paddingValue,
- quantizationScale,
- quantizationOffset);
-}
-
-void PadUint8Test(std::vector<armnn::BackendId>& backends,
- tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
- uint8_t paddingValue = 0,
- uint8_t p = 3,
- float quantizationScale = -2.0f,
- int32_t quantizationOffset = 3)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 2, 2 };
- std::vector<int32_t> outputShape { 3, 5, 6 };
- std::vector<int32_t> paddingShape { 3, 2 };
-
- std::vector<uint8_t> inputValues = { 0, 4,
- 2, 5,
- 6, 1,
- 5, 2 };
-
- std::vector<uint8_t> expectedOutputValues = { p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, 0, 4, p, p,
- p, p, 2, 5, p, p,
- p, p, p, p, p, p,
-
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, 6, 1, p, p,
- p, p, 5, 2, p, p,
- p, p, p, p, p, p,
-
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, p, p, p, p,
- p, p, p, p, p, p };
-
- std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
-
- PadTest<uint8_t>(padOperatorCode,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- paddingShape,
- outputShape,
- inputValues,
- paddingDim,
- expectedOutputValues,
- paddingValue,
- quantizationScale,
- quantizationOffset);
-}
-
-TEST_SUITE("Pad_CpuRefTests")
-{
-
-TEST_CASE ("Pad2d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad2dTest(backends);
-}
-
-TEST_CASE ("Pad3d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad3dTest(backends);
-}
-
-TEST_CASE ("Pad4d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad4dTest(backends);
-}
-
-TEST_CASE ("Pad_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadInt8Test(backends);
-}
-
-TEST_CASE ("Pad_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadUint8Test(backends);
-}
-
-TEST_CASE ("PadV22d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
-}
-
-TEST_CASE ("PadV23d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
-}
-
-TEST_CASE ("PadV24d_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
-}
-
-TEST_CASE ("PadV2_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-TEST_CASE ("PadV2_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-} // TEST_SUITE("Pad_CpuRefTests")
-
-TEST_SUITE("Pad_CpuAccTests")
-{
-
-TEST_CASE ("Pad2d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad2dTest(backends);
-}
-
-TEST_CASE ("Pad3d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad3dTest(backends);
-}
-
-TEST_CASE ("Pad4d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad4dTest(backends);
-}
-
-TEST_CASE ("Pad_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadInt8Test(backends);
-}
-
-TEST_CASE ("Pad_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadUint8Test(backends);
-}
-
-TEST_CASE ("PadV22d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
-}
-
-TEST_CASE ("PadV23d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
-}
-
-TEST_CASE ("PadV24d_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
-}
-
-TEST_CASE ("PadV2_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-TEST_CASE ("PadV2_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-} // TEST_SUITE("Pad_CpuAccTests")
-
-TEST_SUITE("Pad_GpuAccTests")
-{
-
-TEST_CASE ("Pad2d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad2dTest(backends);
-}
-
-TEST_CASE ("Pad3d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad3dTest(backends);
-}
-
-TEST_CASE ("Pad4d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad4dTest(backends);
-}
-
-TEST_CASE ("Pad_Int8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadInt8Test(backends);
-}
-
-TEST_CASE ("Pad_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadUint8Test(backends);
-}
-
-TEST_CASE ("PadV22d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
-}
-
-TEST_CASE ("PadV23d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
-}
-
-TEST_CASE ("PadV24d_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
-}
-
-TEST_CASE ("PadV2_Int8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-TEST_CASE ("PadV2_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
-}
-
-} // TEST_SUITE("Pad_GpuAccTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp
deleted file mode 100644
index e96bc4bfe3..0000000000
--- a/delegate/src/test/PadTestHelper.hpp
+++ /dev/null
@@ -1,224 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-template <typename T>
-std::vector<char> CreatePadTfLiteModel(
- tflite::BuiltinOperator padOperatorCode,
- tflite::TensorType tensorType,
- tflite::MirrorPadMode paddingMode,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& paddingTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- const std::vector<int32_t>& paddingDim,
- const std::vector<T> paddingValue,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- auto inputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- auto paddingTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(paddingTensorShape.data(),
- paddingTensorShape.size()),
- tflite::TensorType_INT32,
- 1,
- flatBufferBuilder.CreateString("padding"));
-
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
- sizeof(int32_t) * paddingDim.size())));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::vector<int32_t> operatorInputs;
- std::vector<int> subgraphInputs;
-
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions;
-
- if (padOperatorCode == tflite::BuiltinOperator_PAD)
- {
- operatorInputs = {{ 0, 1 }};
- subgraphInputs = {{ 0, 1 }};
- operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
- }
- else if(padOperatorCode == tflite::BuiltinOperator_MIRROR_PAD)
- {
- operatorInputs = {{ 0, 1 }};
- subgraphInputs = {{ 0, 1 }};
-
- operatorBuiltinOptionsType = BuiltinOptions_MirrorPadOptions;
- operatorBuiltinOptions = CreateMirrorPadOptions(flatBufferBuilder, paddingMode).Union();
- }
- else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingValue.data()),
- sizeof(T))));
-
- const std::vector<int32_t> shape = { 1 };
- auto padValueTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(shape.data(),
- shape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("paddingValue"),
- quantizationParameters);
-
- tensors.push_back(padValueTensor);
-
- operatorInputs = {{ 0, 1, 3 }};
- subgraphInputs = {{ 0, 1, 3 }};
-
- operatorBuiltinOptionsType = BuiltinOptions_PadV2Options;
- operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union();
- }
-
- // create operator
- const std::vector<int32_t> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> paddingOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphOutputs{ 2 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&paddingOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- padOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void PadTest(tflite::BuiltinOperator padOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- const std::vector<int32_t>& inputShape,
- const std::vector<int32_t>& paddingShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<int32_t>& paddingDim,
- std::vector<T>& expectedOutputValues,
- T paddingValue,
- float quantScale = 1.0f,
- int quantOffset = 0,
- tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
- tensorType,
- paddingMode,
- inputShape,
- paddingShape,
- outputShape,
- paddingDim,
- {paddingValue},
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/Pooling2dTest.cpp b/delegate/src/test/Pooling2dTest.cpp
deleted file mode 100644
index fd52aee70d..0000000000
--- a/delegate/src/test/Pooling2dTest.cpp
+++ /dev/null
@@ -1,1275 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Pooling2dTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void MaxPool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 1, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 12.0f, 7.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 2,
- 2,
- 2,
- 2);
-}
-
-void MaxPool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 1, 2, 1 };
-
- std::vector<int8_t > inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { 12, 7 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 1);
-}
-
-void MaxPool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 12.0f, 7.0f, 3.0f, -1.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2);
-}
-
-void MaxPool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<int8_t> inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { 12, 7, 3, -1 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 1);
-}
-
-void MaxPool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<float> inputValues = { -5.0f, -8.0f, -10.0f, 7.0f,
- -8.0f, -12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 7.0f, 3.0f, 0.0f, 2.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU);
-}
-
-void MaxPool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<int8_t> inputValues = { -5, -8, -10, 7,
- -8, -12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { 1, 1, 7, 3, 1, 2 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU,
- 2.0f,
- 1);
-}
-
-void MaxPool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, -8.0f, -10.0f, 7.0f,
- -8.0f, -12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 1,
- 1,
- ::tflite::ActivationFunctionType_RELU6);
-}
-
-void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<int8_t> inputValues = { -5, -8, -10, 7,
- -8, -12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { 1, 1, 3, 1 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 1,
- 1,
- ::tflite::ActivationFunctionType_RELU6,
- 2.0f,
- 1);
-}
-
-void MaxPool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
- 8, 12, 15, 2,
- 3, 4, 1, 11 };
-
- std::vector<uint8_t> expectedOutputValues = { 12, 15, 4, 11 };
-
- Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 1);
-}
-
-void MaxPool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<uint8_t> inputValues = { 12, 8, 10, 15,
- 8, 5, 7, 2,
- 3, 4, 1, 11 };
-
- std::vector<uint8_t> expectedOutputValues = { 12, 10, 15, 8, 7, 11 };
-
- Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU,
- 2.0f,
- 1);
-}
-
-void MaxPool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<int16_t> inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int16_t> expectedOutputValues = { 12, 7, 3, -1 };
-
- Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 0);
-}
-
-void MaxPool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<int16_t> inputValues = { -5, -8, -10, 7,
- -8, -12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int16_t> expectedOutputValues = { 0, 0, 7, 3, 0, 2 };
-
- Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU,
- 2.0f,
- 0);
-}
-
-void AveragePool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 1, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 5.75f, -4.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 2,
- 2,
- 2,
- 2);
-}
-
-void AveragePool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 1, 2, 1 };
-
- std::vector<int8_t > inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { 6, -4 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 1);
-}
-
-void AveragePool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 5.75f, -4.0f, -0.5f, -6.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2);
-}
-
-void AveragePool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<int8_t > inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { 6, -4, -1, -6 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 1);
-}
-
-void AveragePool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- -8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, 11.0f };
-
- std::vector<float> expectedOutputValues = { 1.75f, 0.0f, 0.0f, 0.75f, 0.0f, 0.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU);
-}
-
-void AveragePool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<int8_t> inputValues = { -5, 8, -10, 7,
- -8, 12, -15, 2,
- 3, -4, -1, 11 };
-
- std::vector<int8_t> expectedOutputValues = { 2, 1, 1, 1, 1, 1 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU,
- 2.5f,
- 1);
-}
-
-void AveragePool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- -8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, 11.0f };
-
- std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 1,
- 1,
- ::tflite::ActivationFunctionType_RELU6);
-}
-
-void AveragePool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<int8_t> inputValues = { -5, 8, -10, 7,
- -8, 12, -15, 2,
- 3, -4, -1, 11 };
-
- std::vector<int8_t> expectedOutputValues = { 1, 1, 3, 1 };
-
- Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 1,
- 1,
- ::tflite::ActivationFunctionType_RELU6,
- 2.5f,
- 1);
-}
-
-void AveragePool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
- 8, 12, 15, 2,
- 3, 4, 1, 11 };
-
- std::vector<uint8_t> expectedOutputValues = { 8, 9, 4, 6 };
-
- Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 1);
-}
-
-void AveragePool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<uint8_t> inputValues = { 12, 8, 10, 15,
- 8, 5, 7, 2,
- 3, 4, 1, 11 };
-
- std::vector<uint8_t> expectedOutputValues = { 8, 8, 9, 5, 4, 5 };
-
- Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU,
- 2.0f,
- 1);
-}
-
-void AveragePool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<int16_t > inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int16_t> expectedOutputValues = { 6, -4, -1, -6 };
-
- Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2,
- tflite::ActivationFunctionType_NONE,
- 2.5f,
- 0);
-}
-
-void AveragePool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<int16_t> inputValues = { -5, 8, -10, 7,
- -8, 12, -15, 2,
- 3, -4, -1, 11 };
-
- std::vector<int16_t> expectedOutputValues = { 2, 0, 0, 1, 0, 0 };
-
- Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU,
- 2.5f,
- 0);
-}
-
-void L2Pool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 1, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 8.616844f, 9.721111f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 2,
- 2,
- 2,
- 2);
-}
-
-void L2Pool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { 8.616844f, 9.721111f, 3.535534f, 7.81025f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 2,
- 2);
-}
-
-void L2Pool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- -8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, 11.0f };
-
- std::vector<float> expectedOutputValues = { 8.616844f, 11.543396f, 9.721111f, 7.632169f, 9.8234415f, 9.367497f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_VALID,
- 1,
- 1,
- 2,
- 2,
- ::tflite::ActivationFunctionType_RELU);
-}
-
-void L2Pool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- -8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, 11.0f };
-
- std::vector<float> expectedOutputValues = { 5.0f, 6.0f, 3.0f, 1.0f };
-
- Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- ::tflite::Padding_SAME,
- 2,
- 2,
- 1,
- 1,
- ::tflite::ActivationFunctionType_RELU6);
-}
-
-TEST_SUITE("Pooling2d_GpuAccTests")
-{
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu6_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- L2Pool2dFP32Relu6Test(backends);
-}
-
-} // TEST_SUITE("Pooling2d_GpuAccTests")
-
-TEST_SUITE("Pooling2d_CpuAccTests")
-{
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- L2Pool2dFP32Relu6Test(backends);
-}
-
-} // TEST_SUITE("Pooling2d_CpuAccTests")
-
-TEST_SUITE("Pooling2d_CpuRefTests")
-{
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int16_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt16PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling2d_Int16_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool2dInt16ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dFP32Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt8Relu6Test(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dUint8PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dUint8ReluTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int16_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt16PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling2d_Int16_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool2dInt16ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32ReluTest(backends);
-}
-
-TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- L2Pool2dFP32Relu6Test(backends);
-}
-
-} // TEST_SUITE("Pooling2d_CpuRefTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/Pooling2dTestHelper.hpp b/delegate/src/test/Pooling2dTestHelper.hpp
deleted file mode 100644
index c7457dbb22..0000000000
--- a/delegate/src/test/Pooling2dTestHelper.hpp
+++ /dev/null
@@ -1,196 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreatePooling2dTfLiteModel(
- tflite::BuiltinOperator poolingOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- tflite::Padding padding = tflite::Padding_SAME,
- int32_t strideWidth = 0,
- int32_t strideHeight = 0,
- int32_t filterWidth = 0,
- int32_t filterHeight = 0,
- tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- flatbuffers::Offset<tflite::Buffer> buffers[3] = {CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder)};
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- flatbuffers::Offset<Tensor> tensors[2] {
- CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters),
-
- CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters)
- };
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreatePool2DOptions(flatBufferBuilder,
- padding,
- strideWidth,
- strideHeight,
- filterWidth,
- filterHeight,
- fusedActivation).Union();
-
- const std::vector<int32_t> operatorInputs{0};
- const std::vector<int32_t> operatorOutputs{1};
- flatbuffers::Offset <Operator> poolingOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const int subgraphInputs[1] = {0};
- const int subgraphOutputs[1] = {1};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors, 2),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs, 1),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs, 1),
- flatBufferBuilder.CreateVector(&poolingOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Pooling2d Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, poolingOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers, 3));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- tflite::Padding padding = tflite::Padding_SAME,
- int32_t strideWidth = 0,
- int32_t strideHeight = 0,
- int32_t filterWidth = 0,
- int32_t filterHeight = 0,
- tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode,
- tensorType,
- inputShape,
- outputShape,
- padding,
- strideWidth,
- strideHeight,
- filterWidth,
- filterHeight,
- fusedActivation,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelegateInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
-}
-
-} // anonymous namespace
-
-
-
-
diff --git a/delegate/src/test/Pooling3dTest.cpp b/delegate/src/test/Pooling3dTest.cpp
deleted file mode 100644
index c0a186210e..0000000000
--- a/delegate/src/test/Pooling3dTest.cpp
+++ /dev/null
@@ -1,431 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Pooling3dTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// Pool3D custom op was only added in tflite r2.6.
-#if defined(ARMNN_POST_TFLITE_2_5)
-
-void MaxPool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input and expected output data
- std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
- std::vector<int32_t> outputShape = { 1, 1, 2, 3, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 6, 6, 4 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kMax";
- TfLitePadding padding = kTfLitePaddingValid;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 1,
- 1,
- 1,
- 2,
- 2,
- 2);
-}
-
-void MaxPool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data
- std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
- std::vector<int32_t> outputShape = { 1, 2, 3, 4, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 6, 6, 4, 4, 6, 6, 6, 6, 4, 5, 6, 6, 6, 6, 4, 4 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kMax";
- TfLitePadding padding = kTfLitePaddingSame;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 1,
- 1,
- 1,
- 2,
- 2,
- 2);
-}
-
-void MaxPool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data
- std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
- std::vector<int32_t> outputShape = { 1, 1, 3, 3, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 2, 3 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kMax";
- TfLitePadding padding = kTfLitePaddingValid;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 1,
- 1,
- 1,
- 2,
- 1,
- 2);
-}
-
-void MaxPool3dFP32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data
- std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
- std::vector<int32_t> outputShape = { 1, 2, 3, 4, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 6, 6 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kMax";
- TfLitePadding padding = kTfLitePaddingUnknown;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 1,
- 1,
- 1,
- 2,
- 2,
- 2);
-}
-
-void AveragePool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data.
- std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
- std::vector<int32_t> outputShape = { 1, 1, 2, 3, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 3.5, 3, 2.5 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kAverage";
- TfLitePadding padding = kTfLitePaddingValid;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 1,
- 1,
- 1,
- 2,
- 2,
- 2);
-}
-
-void AveragePool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data
- std::vector<int32_t> inputShape = { 4, 2, 3, 1, 1 };
- std::vector<int32_t> outputShape = { 4, 2, 3, 1, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 3, 4, 4.5, 4.5, 5.5, 6, 3, 4, 4.5, 4.5, 5.5, 6, 3, 4, 4.5, 4.5 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kAverage";
- TfLitePadding padding = kTfLitePaddingSame;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 1,
- 1,
- 1,
- 2,
- 2,
- 2);
-}
-
-void AveragePool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data
- std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
- std::vector<int32_t> outputShape = { 1, 1, 2, 2, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 1.5, 3.5 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kAverage";
- TfLitePadding padding = kTfLitePaddingUnknown;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 2,
- 2,
- 2,
- 2,
- 1,
- 2);
-}
-
-void AveragePool3dFP32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data and expected output data
- std::vector<int32_t> inputShape = { 4, 3, 2, 1, 1 };
- std::vector<int32_t> outputShape = { 1, 2, 2, 4, 1 };
-
- std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6,
- 1, 2, 3, 4, 5, 6 };
- std::vector<float> expectedOutputValues = { 3.125, 4.25 };
-
- // poolType string required to create the correct pooling operator
- // Padding type required to create the padding in custom options
- std::string poolType = "kMax";
- TfLitePadding padding = kTfLitePaddingUnknown;
-
- Pooling3dTest<float>(poolType,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- padding,
- 2,
- 2,
- 2,
- 2,
- 2,
- 2);
-}
-
-TEST_SUITE("Pooling3d_GpuAccTests")
-{
-
-TEST_CASE ("MaxPooling3d_FP32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32Test(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_H1_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- MaxPool3dFP32H1Test(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingValid_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingSame_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_H1_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- AveragePool3dFP32H1Test(backends);
-}
-
-} // TEST_SUITE("Pooling3d_GpuAccTests")
-
-TEST_SUITE("Pooling3d_CpuAccTests")
-{
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32Test(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_H1_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- MaxPool3dFP32H1Test(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingValid_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingSame_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_H1_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- AveragePool3dFP32H1Test(backends);
-}
-
-} // TEST_SUITE("Pooling3d_CpuAccTests")
-
-TEST_SUITE("Pooling3d_CpuRefTests")
-{
-TEST_CASE ("MaxPooling3d_FP32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32Test(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("MaxPooling3d_FP32_H1_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- MaxPool3dFP32H1Test(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingValid_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool3dFP32PaddingValidTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_PaddingSame_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool3dFP32PaddingSameTest(backends);
-}
-
-TEST_CASE ("AveragePooling3d_FP32_H1_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- AveragePool3dFP32H1Test(backends);
-}
-
-} // TEST_SUITE("Pooling3d_CpuRefTests")
-
-#endif
-
-} \ No newline at end of file
diff --git a/delegate/src/test/Pooling3dTestHelper.hpp b/delegate/src/test/Pooling3dTestHelper.hpp
deleted file mode 100644
index 47e00f7b7f..0000000000
--- a/delegate/src/test/Pooling3dTestHelper.hpp
+++ /dev/null
@@ -1,298 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <flatbuffers/flexbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/custom_ops_register.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-#if defined(ARMNN_POST_TFLITE_2_5)
-
-std::vector<uint8_t> CreateCustomOptions(int, int, int, int, int, int, TfLitePadding);
-
-std::vector<char> CreatePooling3dTfLiteModel(
- std::string poolType,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- TfLitePadding padding = kTfLitePaddingSame,
- int32_t strideWidth = 0,
- int32_t strideHeight = 0,
- int32_t strideDepth = 0,
- int32_t filterWidth = 0,
- int32_t filterHeight = 0,
- int32_t filterDepth = 0,
- tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- // Create the input and output tensors
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // Create the custom options from the function below
- std::vector<uint8_t> customOperatorOptions = CreateCustomOptions(strideHeight, strideWidth, strideDepth,
- filterHeight, filterWidth, filterDepth, padding);
- // opCodeIndex is created as a uint8_t to avoid map lookup
- uint8_t opCodeIndex = 0;
- // Set the operator name based on the PoolType passed in from the test case
- std::string opName = "";
- if (poolType == "kMax")
- {
- opName = "MaxPool3D";
- }
- else
- {
- opName = "AveragePool3D";
- }
- // To create a custom operator code you pass in the builtin code for custom operators and the name of the custom op
- flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCodeDirect(flatBufferBuilder,
- tflite::BuiltinOperator_CUSTOM,
- opName.c_str());
-
- // Create the Operator using the opCodeIndex and custom options. Also sets builtin options to none.
- const std::vector<int32_t> operatorInputs{ 0 };
- const std::vector<int32_t> operatorOutputs{ 1 };
- flatbuffers::Offset<Operator> poolingOperator =
- CreateOperator(flatBufferBuilder,
- opCodeIndex,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- tflite::BuiltinOptions_NONE,
- 0,
- flatBufferBuilder.CreateVector<uint8_t>(customOperatorOptions),
- tflite::CustomOptionsFormat_FLEXBUFFERS);
-
- // Create the subgraph using the operator created above.
- const std::vector<int> subgraphInputs{ 0 };
- const std::vector<int> subgraphOutputs{ 1 };
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&poolingOperator, 1));
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Pooling3d Operator Model");
-
- // Create the model using operatorCode and the subgraph.
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T>
-void Pooling3dTest(std::string poolType,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- TfLitePadding padding = kTfLitePaddingSame,
- int32_t strideWidth = 0,
- int32_t strideHeight = 0,
- int32_t strideDepth = 0,
- int32_t filterWidth = 0,
- int32_t filterHeight = 0,
- int32_t filterDepth = 0,
- tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- // Create the single op model buffer
- std::vector<char> modelBuffer = CreatePooling3dTfLiteModel(poolType,
- tensorType,
- inputShape,
- outputShape,
- padding,
- strideWidth,
- strideHeight,
- strideDepth,
- filterWidth,
- filterHeight,
- filterDepth,
- fusedActivation,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-
- // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
- // Based on the poolType from the test case add the custom operator using the name and the tflite
- // registration function
- tflite::ops::builtin::BuiltinOpResolver armnn_op_resolver;
- if (poolType == "kMax")
- {
- armnn_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
- }
- else
- {
- armnn_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
- }
-
- CHECK(InterpreterBuilder(tfLiteModel, armnn_op_resolver)
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
-
- // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
- // Based on the poolType from the test case add the custom operator using the name and the tflite
- // registration function
- tflite::ops::builtin::BuiltinOpResolver tflite_op_resolver;
- if (poolType == "kMax")
- {
- tflite_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
- }
- else
- {
- tflite_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
- }
-
- CHECK(InterpreterBuilder(tfLiteModel, tflite_op_resolver)
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelegateInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
-}
-
-// Function to create the flexbuffer custom options for the custom pooling3d operator.
-std::vector<uint8_t> CreateCustomOptions(int strideHeight, int strideWidth, int strideDepth,
- int filterHeight, int filterWidth, int filterDepth, TfLitePadding padding)
-{
- auto flex_builder = std::make_unique<flexbuffers::Builder>();
- size_t map_start = flex_builder->StartMap();
- flex_builder->String("data_format", "NDHWC");
- // Padding is created as a key and padding type. Only VALID and SAME supported
- if (padding == kTfLitePaddingValid)
- {
- flex_builder->String("padding", "VALID");
- }
- else
- {
- flex_builder->String("padding", "SAME");
- }
-
- // Vector of filter dimensions in order ( 1, Depth, Height, Width, 1 )
- auto start = flex_builder->StartVector("ksize");
- flex_builder->Add(1);
- flex_builder->Add(filterDepth);
- flex_builder->Add(filterHeight);
- flex_builder->Add(filterWidth);
- flex_builder->Add(1);
- // EndVector( start, bool typed, bool fixed)
- flex_builder->EndVector(start, true, false);
-
- // Vector of stride dimensions in order ( 1, Depth, Height, Width, 1 )
- auto stridesStart = flex_builder->StartVector("strides");
- flex_builder->Add(1);
- flex_builder->Add(strideDepth);
- flex_builder->Add(strideHeight);
- flex_builder->Add(strideWidth);
- flex_builder->Add(1);
- // EndVector( stridesStart, bool typed, bool fixed)
- flex_builder->EndVector(stridesStart, true, false);
-
- flex_builder->EndMap(map_start);
- flex_builder->Finish();
-
- return flex_builder->GetBuffer();
-}
-#endif
-} // anonymous namespace
-
-
-
-
diff --git a/delegate/src/test/PreluTest.cpp b/delegate/src/test/PreluTest.cpp
deleted file mode 100644
index d9e08d20ca..0000000000
--- a/delegate/src/test/PreluTest.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "PreluTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate {
-
-void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false) {
- std::vector<int32_t> inputShape { 1, 2, 3 };
- std::vector<int32_t> alphaShape { 1 };
- std::vector<int32_t> outputShape { 1, 2, 3 };
-
- if (isDynamicOutput)
- {
- outputShape.clear();
- }
-
- std::vector<float> inputData = { -14.f, 2.f, 0.f, 1.f, -5.f, 14.f };
- std::vector<float> alphaData = { 0.5f };
- std::vector<float> expectedOutput = { -7.f, 2.f, 0.f, 1.f, -2.5f, 14.f };
-
- PreluTest(tflite::BuiltinOperator_PRELU,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- alphaShape,
- outputShape,
- inputData,
- alphaData,
- expectedOutput,
- isAlphaConst);
-}
-
-TEST_SUITE("Prelu_CpuRefTests")
-{
-
-TEST_CASE ("PreluFp32SimpleConstTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, true);
-}
-
-TEST_CASE ("PreluFp32SimpleTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, false);
-}
-
-TEST_CASE ("PreluFp32SimpleConstDynamicTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, true, true);
-}
-
-TEST_CASE ("PreluFp32SimpleDynamicTest_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- PreluFloatSimpleTest(backends, false, true);
-}
-
-} // TEST_SUITE("Prelu_CpuRefTests")
-
-TEST_SUITE("Prelu_CpuAccTests")
-{
-
-TEST_CASE ("PreluFp32SimpleConstTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, true);
-}
-
-TEST_CASE ("PreluFp32SimpleTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, false);
-}
-
-TEST_CASE ("PreluFp32SimpleConstDynamicTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, true, true);
-}
-
-TEST_CASE ("PreluFp32SimpleDynamicTest_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- PreluFloatSimpleTest(backends, false, true);
-}
-
-} // TEST_SUITE("Prelu_CpuAccTests")
-
-TEST_SUITE("Prelu_GpuAccTests")
-{
-
-TEST_CASE ("PreluFp32SimpleConstTest_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, true);
-}
-
-TEST_CASE ("PreluFp32SimpleTest_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, false);
-}
-
-TEST_CASE ("PreluFp32SimpleConstDynamicTest_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, true, true);
-}
-
-TEST_CASE ("PreluFp32SimpleDynamicTest_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- PreluFloatSimpleTest(backends, false, true);
-}
-
-} // TEST_SUITE("Prelu_GpuAccTests")
-
-} \ No newline at end of file
diff --git a/delegate/src/test/PreluTestHelper.hpp b/delegate/src/test/PreluTestHelper.hpp
deleted file mode 100644
index b50c37763f..0000000000
--- a/delegate/src/test/PreluTestHelper.hpp
+++ /dev/null
@@ -1,195 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputShape,
- const std::vector<int32_t>& alphaShape,
- const std::vector<int32_t>& outputShape,
- std::vector<float>& alphaData,
- bool alphaIsConstant)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t *>(alphaData.data()), sizeof(float) * alphaData.size())));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ 1.0f }),
- flatBufferBuilder.CreateVector<int64_t>({ 0 }));
-
- auto inputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
- inputShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- auto alphaTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(alphaShape.data(),
- alphaShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("alpha"),
- quantizationParameters);
-
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
- outputShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, alphaTensor, outputTensor };
-
- const std::vector<int> operatorInputs{0, 1};
- const std::vector<int> operatorOutputs{2};
- flatbuffers::Offset <Operator> preluOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
-
- std::vector<int> subgraphInputs{0};
- if (!alphaIsConstant)
- {
- subgraphInputs.push_back(1);
- }
-
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&preluOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Prelu Operator Model");
- flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, preluOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&opCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void PreluTest(tflite::BuiltinOperator preluOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- const std::vector<int32_t>& inputShape,
- const std::vector<int32_t>& alphaShape,
- std::vector<int32_t>& outputShape,
- std::vector<float>& inputData,
- std::vector<float>& alphaData,
- std::vector<float>& expectedOutput,
- bool alphaIsConstant)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer = CreatePreluTfLiteModel(preluOperatorCode,
- tensorType,
- inputShape,
- alphaShape,
- outputShape,
- alphaData,
- alphaIsConstant);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- CHECK(tfLiteModel != nullptr);
-
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
-
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputData);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputData);
-
- // Set alpha data if not constant
- if (!alphaIsConstant) {
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 1, alphaData);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 1, alphaData);
- }
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < expectedOutput.size(); i++)
- {
- CHECK(expectedOutput[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]);
- CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
- }
-}
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/QuantizationTest.cpp b/delegate/src/test/QuantizationTest.cpp
deleted file mode 100644
index fbc2903d38..0000000000
--- a/delegate/src/test/QuantizationTest.cpp
+++ /dev/null
@@ -1,455 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "QuantizationTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// Dequantize operator test functions.
-void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- // Set input and output data
- std::vector<uint8_t> inputValues
- {
- 0, 1, 2, 3, // Lower bounds
- 252, 253, 254, 255 // Upper bounds
- };
- std::vector<float> expectedOutputValues
- {
- 0.f, 1.f, 2.f, 3.f,
- 252.f, 253.f, 254.f, 255.f
- };
-
- QuantizationTest<uint8_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
- ::tflite::TensorType_UINT8,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<int8_t> inputValues
- {
- -1, 0, 1, 2,
- -128, -127, 126, 127
- };
- std::vector<float> expectedOutputValues
- {
- -1.f, 0.f, 1.f, 2.f,
- -128.f, -127.f, 126.f, 127.f
- };
-
- QuantizationTest<int8_t , float>(tflite::BuiltinOperator_DEQUANTIZE,
- ::tflite::TensorType_INT8,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 5 };
- std::vector<int32_t> outputShape { 2, 5 };
-
- std::vector<int16_t> inputValues
- {
- -1, 0, 1, 2,
- -32768, -16384, 16384, 32767
- };
- std::vector<float> expectedOutputValues
- {
- -1.f, 0.f, 1.f, 2.f,
- -32768.f, -16384.f, 16384.f, 32767.f
- };
-
- QuantizationTest<int16_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
- ::tflite::TensorType_INT16,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-// Quantize operator test functions.
-void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- // Set input and output data
- std::vector<float> inputValues
- {
- -1.f, 0.f, 1.f, 2.f, // Lower bounds
- 252.f, 253.f, 255.f, 256.f // Upper bounds
- };
- std::vector<uint8_t> expectedOutputValues
- {
- 0, 0, 1, 2,
- 252, 253, 255, 255
- };
-
- QuantizationTest<float, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_FLOAT32,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<float> inputValues
- {
- -1.f, 0.f, 1.f, 2.f,
- -128.5f, -127.f, 126.f, 127.5f
- };
- std::vector<int8_t> expectedOutputValues
- {
- -1, 0, 1, 2,
- -128, -127, 126, 127
- };
-
- QuantizationTest<float, int8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_FLOAT32,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<float> inputValues
- {
- -1.f, 0.f, 1.f, 2.f,
- -32768.5f, -16384.f, 16384.f, 32767.5f
- };
- std::vector<int16_t> expectedOutputValues
- {
- -1, 0, 1, 2,
- -32768, -16384, 16384, 32767
- };
-
- QuantizationTest<float, int16_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_FLOAT32,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<int16_t> inputValues
- {
- -1, 0, 1, 2,
- -32768, -16384, 16384, 32767
- };
- std::vector<int16_t> expectedOutputValues
- {
- -1, 0, 1, 2,
- -32768, -16384, 16384, 32767
- };
-
- QuantizationTest<int16_t, int16_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_INT16,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<int16_t> inputValues
- {
- -1, 0, 1, 2,
- -32768, -16384, 16384, 32767
- };
- std::vector<int8_t> expectedOutputValues
- {
- -1, 0, 1, 2,
- -128, -128, 127, 127
- };
-
- QuantizationTest<int16_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_INT16,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<int8_t> inputValues
- {
- -1, 0, 1, 2,
- -128, -127, 126, 127
- };
- std::vector<uint8_t> expectedOutputValues
- {
- 0, 0, 1, 2,
- 0, 0, 126, 127
- };
-
- QuantizationTest<int8_t, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_INT8,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 2, 4 };
- std::vector<int32_t> outputShape { 2, 4 };
-
- std::vector<uint8_t> inputValues
- {
- 0, 1, 2, 3,
- 126, 127, 254, 255
- };
- std::vector<int8_t> expectedOutputValues
- {
- 0, 1, 2, 3,
- 126, 127, 127, 127
- };
-
- QuantizationTest<uint8_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
- ::tflite::TensorType_UINT8,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues);
-}
-
-TEST_SUITE("CpuRef_QuantizationTests")
-{
-
-TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DequantizeUint8Test(backends);
-}
-
-
-TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DequantizeInt8Test(backends);
-}
-
-
-TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DequantizeInt16Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeFloat32Uint8Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeFloat32Int8Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeFloat32Int16Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt16Int16Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt16Int8Test(backends);
-}
-
-
-
-TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt8Uint8Test(backends);
-}
-
-
-TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeUint8Int8Test(backends);
-}
-
-}
-
-TEST_SUITE("CpuAcc_QuantizationTests")
-{
-
-// Dequantize Operator Tests
-TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DequantizeUint8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DequantizeInt8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DequantizeInt16Test(backends);
-}
-
-// Quantize Operator Tests
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeFloat32Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeFloat32Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeInt8Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- QuantizeUint8Int8Test(backends);
-}
-
-}
-
-TEST_SUITE("GpuAcc_QuantizationTests")
-{
-
-// Dequantize Operator Tests
-TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DequantizeUint8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DequantizeInt8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DequantizeInt16Test(backends);
-}
-
-// Quantize Operator Tests
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeFloat32Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeFloat32Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeInt8Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- QuantizeUint8Int8Test(backends);
-}
-
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/QuantizationTestHelper.hpp b/delegate/src/test/QuantizationTestHelper.hpp
deleted file mode 100644
index a8b102271a..0000000000
--- a/delegate/src/test/QuantizationTestHelper.hpp
+++ /dev/null
@@ -1,200 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateQuantizationTfLiteModel(tflite::BuiltinOperator quantizationOperatorCode,
- tflite::TensorType inputTensorType,
- tflite::TensorType outputTensorType,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }),
- QuantizationDetails_CustomQuantization);
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- inputTensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- outputTensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
- flatbuffers::Offset<void> operatorBuiltinOptions = 0;
- switch (quantizationOperatorCode)
- {
- case BuiltinOperator_QUANTIZE:
- {
- operatorBuiltinOptionsType = BuiltinOptions_QuantizeOptions;
- operatorBuiltinOptions = CreateQuantizeOptions(flatBufferBuilder).Union();
- break;
- }
- case BuiltinOperator_DEQUANTIZE:
- {
- operatorBuiltinOptionsType = BuiltinOptions_DequantizeOptions;
- operatorBuiltinOptions = CreateDequantizeOptions(flatBufferBuilder).Union();
- break;
- }
- default:
- break;
- }
-
- const std::vector<int32_t> operatorInputs{0};
- const std::vector<int32_t> operatorOutputs{1};
- flatbuffers::Offset <Operator> quantizationOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{0};
- const std::vector<int> subgraphOutputs{1};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&quantizationOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Quantization Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, quantizationOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename InputT, typename OutputT>
-void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode,
- tflite::TensorType inputTensorType,
- tflite::TensorType outputTensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& outputShape,
- std::vector<InputT>& inputValues,
- std::vector<OutputT>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateQuantizationTfLiteModel(quantizeOperatorCode,
- inputTensorType,
- outputTensorType,
- inputShape,
- outputShape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<InputT>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<InputT>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
- }
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/RedefineTestHelper.hpp b/delegate/src/test/RedefineTestHelper.hpp
deleted file mode 100644
index 7f811d56dd..0000000000
--- a/delegate/src/test/RedefineTestHelper.hpp
+++ /dev/null
@@ -1,202 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateRedefineTfLiteModel(
- tflite::BuiltinOperator redefineOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- const std::vector<int32_t>& targetShape,
- bool useOption = true,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- auto inputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- std::vector<flatbuffers::Offset<Tensor>> tensors;
- std::vector<int32_t> operatorInputs;
- std::vector<int> subgraphInputs;
- flatbuffers::Offset<void> operatorBuiltinOptions;
-
- if (useOption)
- {
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
- tensors = { inputTensor, outputTensor};
- operatorInputs = {0};
- subgraphInputs = {0};
- operatorBuiltinOptions = CreateReshapeOptions(
- flatBufferBuilder,
- flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
- }
- else
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
- sizeof(int32_t) * targetShape.size())));
- int32_t size = static_cast<int32_t>(targetShape.size());
- auto shapeTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>( { size } ),
- tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("shape"));
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- auto outputTensor = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- tensors = { inputTensor, outputTensor, shapeTensor };
- operatorInputs = {0, 2};
- subgraphInputs = {0, 2};
- operatorBuiltinOptions = CreateReshapeOptions(flatBufferBuilder).Union();
- }
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ReshapeOptions;
-
- const std::vector<int32_t> operatorOutputs{1};
- flatbuffers::Offset <Operator> redefineOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphOutputs{1};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&redefineOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- redefineOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
- tflite::TensorType tensorType,
- const std::vector<armnn::BackendId>& backends,
- const std::vector<int32_t>& inputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- std::vector<int32_t>& targetShape,
- bool useOption = true,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
- tensorType,
- inputShape,
- outputShape,
- targetShape,
- useOption,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- CHECK(tfLiteModel != nullptr);
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ReduceTest.cpp b/delegate/src/test/ReduceTest.cpp
deleted file mode 100644
index 9c11c8736c..0000000000
--- a/delegate/src/test/ReduceTest.cpp
+++ /dev/null
@@ -1,423 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ReduceTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void ReduceUint8KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<uint8_t>& expectedOutputValues)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 1, 3 };
-
- std::vector<uint8_t> input0Values { 1, 2, 3,
- 4, 3, 1 }; // Inputs
- std::vector<int32_t> input1Values { 2 }; // Axis
-
- ReduceTest<uint8_t>(reduceOperatorCode,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- true);
-}
-
-void ReduceUint8Test(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<uint8_t>& expectedOutputValues)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 3 };
-
- std::vector<uint8_t> input0Values { 1, 2, 3,
- 4, 3, 1 }; // Inputs
- std::vector<int32_t> input1Values { 2 }; // Axis
-
- ReduceTest<uint8_t>(reduceOperatorCode,
- ::tflite::TensorType_UINT8,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- false);
-}
-
-void ReduceFp32KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& expectedOutputValues)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 1, 3 };
-
- std::vector<float> input0Values { 1001.0f, 11.0f, 1003.0f,
- 10.0f, 1002.0f, 12.0f }; // Inputs
- std::vector<int32_t> input1Values { 2 }; // Axis
-
- ReduceTest<float>(reduceOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- true);
-}
-
-void ReduceFp32Test(tflite::BuiltinOperator reduceOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& expectedOutputValues)
-{
- std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
- std::vector<int32_t> input1Shape { 1 };
- std::vector<int32_t> expectedOutputShape { 1, 1, 3 };
-
- std::vector<float> input0Values { 1001.0f, 11.0f, 1003.0f,
- 10.0f, 1002.0f, 12.0f }; // Inputs
- std::vector<int32_t> input1Values { 2 }; // Axis
-
- ReduceTest<float>(reduceOperatorCode,
- ::tflite::TensorType_FLOAT32,
- backends,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input0Values,
- input1Values,
- expectedOutputValues,
- false);
-}
-
-// REDUCE_MAX Tests
-TEST_SUITE("ReduceMax_CpuRefTests")
-{
-
-TEST_CASE ("ReduceMax_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMax_CpuRefTests
-
-TEST_SUITE("ReduceMax_CpuAccTests")
-{
-
-TEST_CASE ("ReduceMax_Uint8_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-
-TEST_CASE ("ReduceMax_Fp32_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMax_CpuAccTests
-
-TEST_SUITE("ReduceMax_GpuAccTests")
-{
-
-TEST_CASE ("ReduceMax_Uint8_KeepDims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
- ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-
-TEST_CASE ("ReduceMax_Fp32_KeepDims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("ReduceMax_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<float> expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMax_GpuAccTests
-
-// REDUCE_MIN Tests
-TEST_SUITE("ReduceMin_CpuRefTests")
-{
-
-TEST_CASE ("ReduceMin_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 10.0f, 11.0f, 12.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMin_CpuRefTests
-
-TEST_SUITE("ReduceMin_CpuAccTests")
-{
-
-TEST_CASE ("ReduceMin_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 10.0f, 11.0f, 12.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMin_CpuAccTests
-
-TEST_SUITE("ReduceMin_GpuAccTests")
-{
-
-TEST_CASE ("ReduceMin_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<float> expectedOutputValues { 10.0f, 11.0f, 12.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
- backends,
- expectedOutputValues);
-}
-
-} // End of ReduceMin_GpuAccTests
-
-// SUM Tests
-TEST_SUITE("Sum_CpuRefTests")
-{
-
-TEST_CASE ("Sum_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Sum_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
- ReduceFp32Test(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-} // End of Sum_CpuRefTests
-
-TEST_SUITE("Sum_CpuAccTests")
-{
-
-TEST_CASE ("Sum_Uint8_KeepDims_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Sum_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
- ReduceFp32Test(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-} // End of Sum_CpuAccTests
-
-TEST_SUITE("Sum_GpuAccTests")
-{
-
-TEST_CASE ("Sum_Uint8_KeepDims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Sum_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<float> expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
- ReduceFp32Test(tflite::BuiltinOperator_SUM,
- backends,
- expectedOutputValues);
-}
-
-} // End of Sum_GpuAccTests
-
-// PROD Tests
-TEST_SUITE("Prod_CpuRefTests")
-{
-
-TEST_CASE ("Prod_Uint8_KeepDims_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Prod_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<float> expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-} // End of Prod_CpuRefTests
-
-TEST_SUITE("Prod_CpuAccTests")
-{
-
-TEST_CASE ("Prod_Uint8_KeepDims_CpuAcc_Test" )
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Prod_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- std::vector<float> expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-} // End of Prod_CpuAccTests
-
-TEST_SUITE("Prod_GpuAccTests")
-{
-
-TEST_CASE ("Prod_Uint8_KeepDims_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
- ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-TEST_CASE ("Prod_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- std::vector<float> expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
- ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
- backends,
- expectedOutputValues);
-}
-
-} // End of Prod_GpuAccTests
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ReduceTestHelper.hpp b/delegate/src/test/ReduceTestHelper.hpp
deleted file mode 100644
index f500736080..0000000000
--- a/delegate/src/test/ReduceTestHelper.hpp
+++ /dev/null
@@ -1,228 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperatorCode,
- tflite::TensorType tensorType,
- std::vector<int32_t>& input0TensorShape,
- std::vector<int32_t>& input1TensorShape,
- const std::vector <int32_t>& outputTensorShape,
- std::vector<int32_t>& axisData,
- const bool keepDims,
- float quantScale = 1.0f,
- int quantOffset = 0,
- bool kTfLiteNoQuantizationForQuantized = false)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- flatbuffers::Offset<tflite::Buffer> buffers[4] = {
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
- sizeof(int32_t) * axisData.size())),
- CreateBuffer(flatBufferBuilder)
- };
-
- flatbuffers::Offset<tflite::QuantizationParameters> quantizationParametersAxis
- = CreateQuantizationParameters(flatBufferBuilder);
-
- flatbuffers::Offset<tflite::QuantizationParameters> quantizationParameters;
-
- if (kTfLiteNoQuantizationForQuantized)
- {
- if ((quantScale == 1 || quantScale == 0) && quantOffset == 0)
- {
- // Creates quantization parameter with quantization.type = kTfLiteNoQuantization
- quantizationParameters = CreateQuantizationParameters(flatBufferBuilder);
- }
- else
- {
- // Creates quantization parameter with quantization.type != kTfLiteNoQuantization
- quantizationParameters = CreateQuantizationParameters(
- flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
- }
- }
- else
- {
- quantizationParameters = CreateQuantizationParameters(
- flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
- }
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
- input0TensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
- input1TensorShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("axis"),
- quantizationParametersAxis);
-
- // Create output tensor
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 3,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- // Create operator. Reduce operations MIN, MAX, SUM, MEAN, PROD uses ReducerOptions.
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ReducerOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateReducerOptions(flatBufferBuilder, keepDims).Union();
-
- const std::vector<int> operatorInputs{ {0, 1} };
- const std::vector<int> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> reduceOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1} };
- const std::vector<int> subgraphOutputs{ 2 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&reduceOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Reduce Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, reduceOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers, 4));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& input0Shape,
- std::vector<int32_t>& input1Shape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& input0Values,
- std::vector<int32_t>& input1Values,
- std::vector<T>& expectedOutputValues,
- const bool keepDims,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBufferArmNN = CreateReduceTfLiteModel(reduceOperatorCode,
- tensorType,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input1Values,
- keepDims,
- quantScale,
- quantOffset,
- false);
- std::vector<char> modelBufferTFLite = CreateReduceTfLiteModel(reduceOperatorCode,
- tensorType,
- input0Shape,
- input1Shape,
- expectedOutputShape,
- input1Values,
- keepDims,
- quantScale,
- quantOffset,
- true);
-
- const Model* tfLiteModelArmNN = GetModel(modelBufferArmNN.data());
- const Model* tfLiteModelTFLite = GetModel(modelBufferTFLite.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModelArmNN, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModelTFLite, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues);
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/ReshapeTest.cpp b/delegate/src/test/ReshapeTest.cpp
deleted file mode 100644
index 11449e29b8..0000000000
--- a/delegate/src/test/ReshapeTest.cpp
+++ /dev/null
@@ -1,517 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RedefineTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-#include <half/half.hpp>
-
-using Half = half_float::half;
-
-namespace armnnDelegate
-{
-
-void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 3, 2, 2 };
- std::vector<int32_t> targetShape { 1, 3, 2, 2 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption);
-}
-
-using namespace half_float::literal;
-
-void ReshapeSimpleFloat16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 3, 2, 2 };
- std::vector<int32_t> targetShape { 1, 3, 2, 2 };
-
- std::vector<Half> inputValues = { 5._h, -8._h, -10._h, 7._h,
- 8._h, 12._h, -15._h, 2._h,
- 3._h, -4._h, -1._h, -11._h };
-
- std::vector<Half> expectedOutputValues = { 5._h, -8._h, -10._h, 7._h,
- 8._h, 12._h, -15._h, 2._h,
- 3._h, -4._h, -1._h, -11._h };
-
- RedefineTest<Half>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_FLOAT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption);
-}
-
-void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 1, 4, 3 };
- std::vector<int32_t> targetShape { 1, 4, 3 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption);
-}
-
-void ReshapeFlattenTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 6, 2 };
- std::vector<int32_t> targetShape { -1, 2 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption);
-}
-
-void ReshapeFlattenAllTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 12 };
- std::vector<int32_t> targetShape { -1 };
-
- std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
- 8.0f, 12.0f, -15.0f, 2.0f,
- 3.0f, -4.0f, -1.0f, -11.0f };
-
- RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption);
-}
-
-void ReshapeInt8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 6, 2 };
- std::vector<int32_t> targetShape { -1, 2 };
-
- std::vector<int8_t> inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int8_t> expectedOutputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- RedefineTest<int8_t>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_INT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption,
- 2.5f,
- 1);
-}
-
-void ReshapeUint8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 6, 2 };
- std::vector<int32_t> targetShape { -1, 2 };
-
- std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
- 8, 12, 15, 2,
- 3, 4, 1, 11 };
-
- std::vector<uint8_t> expectedOutputValues = { 5, 8, 10, 7,
- 8, 12, 15, 2,
- 3, 4, 1, 11 };
-
- RedefineTest<uint8_t>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption,
- 2.5f,
- 1);
-}
-
-void ReshapeInt16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 3, 4, 1 };
- std::vector<int32_t> outputShape { 6, 2 };
- std::vector<int32_t> targetShape { -1, 2 };
-
- std::vector<int16_t> inputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- std::vector<int16_t> expectedOutputValues = { -5, 8, -10, 7,
- 8, 12, -15, 2,
- 3, -4, -1, -11 };
-
- RedefineTest<int16_t>(tflite::BuiltinOperator_RESHAPE,
- ::tflite::TensorType_INT16,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- targetShape,
- useOption,
- 2.5f,
- 0);
-}
-
-TEST_SUITE("Reshape_GpuAccTests")
-{
-
-TEST_CASE ("Reshape_Simple_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleTest(backends);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeReduceDimTest(backends);
-}
-
-TEST_CASE ("Reshape_Flatten_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenTest(backends);
-}
-
-TEST_CASE ("Reshape_FlattenAll_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenAllTest(backends);
-}
-
-TEST_CASE ("Reshape_Int8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeInt8Test(backends);
-}
-
-TEST_CASE ("Reshape_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeUint8Test(backends);
-}
-
-TEST_CASE ("Reshape_Float16_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleFloat16Test(backends);
-}
-
-TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleTest(backends, false);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeReduceDimTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Flatten_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenTest(backends, false);
-}
-
-TEST_CASE ("Reshape_FlattenAll_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeFlattenAllTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Int8_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeInt8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeUint8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Float16_ShapeTensor_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ReshapeSimpleFloat16Test(backends, false);
-}
-
-} // TEST_SUITE("Reshape_GpuAccTests")
-
-TEST_SUITE("Reshape_CpuAccTests")
-{
-
-TEST_CASE ("Reshape_Simple_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleTest(backends);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeReduceDimTest(backends);
-}
-
-TEST_CASE ("Reshape_Flatten_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenTest(backends);
-}
-
-TEST_CASE ("Reshape_FlattenAll_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenAllTest(backends);
-}
-
-TEST_CASE ("Reshape_Int8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeInt8Test(backends);
-}
-
-TEST_CASE ("Reshape_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeUint8Test(backends);
-}
-
-TEST_CASE ("Reshape_Float16_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleFloat16Test(backends);
-}
-
-TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleTest(backends, false);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeReduceDimTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenTest(backends, false);
-}
-
-TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeFlattenAllTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Int8_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeInt8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeUint8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Float16_ShapeTensor_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ReshapeSimpleFloat16Test(backends, false);
-}
-
-} // TEST_SUITE("Reshape_CpuAccTests")
-
-TEST_SUITE("Reshape_CpuRefTests")
-{
-
-TEST_CASE ("Reshape_Simple_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleTest(backends);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeReduceDimTest(backends);
-}
-
-TEST_CASE ("Reshape_Flatten_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenTest(backends);
-}
-
-TEST_CASE ("Reshape_FlattenAll_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenAllTest(backends);
-}
-
-TEST_CASE ("Reshape_Int8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt8Test(backends);
-}
-
-TEST_CASE ("Reshape_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeUint8Test(backends);
-}
-
-TEST_CASE ("Reshape_Int16_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt16Test(backends);
-}
-
-TEST_CASE ("Reshape_Float16_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleFloat16Test(backends);
-}
-
-TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleTest(backends, false);
-}
-
-TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeReduceDimTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenTest(backends, false);
-}
-
-TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeFlattenAllTest(backends, false);
-}
-
-TEST_CASE ("Reshape_Int8_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeUint8Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeInt16Test(backends, false);
-}
-
-TEST_CASE ("Reshape_Float16_ShapeTensor_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ReshapeSimpleFloat16Test(backends, false);
-}
-
-} // TEST_SUITE("Reshape_CpuRefTests")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ResizeTest.cpp b/delegate/src/test/ResizeTest.cpp
deleted file mode 100644
index 394ad6c7ae..0000000000
--- a/delegate/src/test/ResizeTest.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ResizeTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<float> input1Values
- {
- 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f,
- 6.0f, 7.0f, 8.0f
- };
- const std::vector<int32_t> input2NewShape { 5, 5 };
-
- // Calculate output data
- std::vector<float> expectedOutputValues
- {
- 0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
- 1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
- 3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
- 5.4f, 6.0f, 6.6f, 7.2f, 7.4f,
- 6.0f, 6.6f, 7.2f, 7.8f, 8.0f
- };
-
- const std::vector<int32_t> input1Shape { 1, 3, 3, 1 };
- const std::vector<int32_t> input2Shape { 2 };
- const std::vector<int32_t> expectedOutputShape = input2NewShape;
-
- ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR,
- backends,
- input1Values,
- input1Shape,
- input2NewShape,
- input2Shape,
- expectedOutputValues,
- expectedOutputShape);
-}
-
-void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
-{
- // Set input data
- std::vector<float> input1Values { 1.0f, 2.0f, 3.0f, 4.0f }
- ;
- const std::vector<int32_t> input2NewShape { 1, 1 };
-
- // Calculate output data
- std::vector<float> expectedOutputValues { 1.0f };
-
- const std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
- const std::vector<int32_t> input2Shape { 2 };
- const std::vector<int32_t> expectedOutputShape = input2NewShape;
-
- ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- backends,
- input1Values,
- input1Shape,
- input2NewShape,
- input2Shape,
- expectedOutputValues,
- expectedOutputShape);
-}
-
-TEST_SUITE("ResizeTests_GpuAccTests")
-{
-
-TEST_CASE ("Resize_Biliniar_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ResizeBiliniarFloat32Test(backends);
-}
-
-TEST_CASE ("Resize_NearestNeighbour_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- ResizeNearestNeighbourFloat32Test(backends);
-}
-
-} // TEST_SUITE("ResizeTests_GpuAccTests")
-
-
-TEST_SUITE("ResizeTests_CpuAccTests")
-{
-
-TEST_CASE ("Resize_Biliniar_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ResizeBiliniarFloat32Test(backends);
-}
-
-TEST_CASE ("Resize_NearestNeighbour_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- ResizeNearestNeighbourFloat32Test(backends);
-}
-
-} // TEST_SUITE("ResizeTests_CpuAccTests")
-
-
-TEST_SUITE("ResizeTests_CpuRefTests")
-{
-
-TEST_CASE ("Resize_Biliniar_Float32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ResizeBiliniarFloat32Test(backends);
-}
-
-TEST_CASE ("Resize_NearestNeighbour_Float32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ResizeNearestNeighbourFloat32Test(backends);
-}
-
-} // TEST_SUITE("ResizeTests_CpuRefTests")
-
-} // namespace armnnDelegate
diff --git a/delegate/src/test/ResizeTestHelper.hpp b/delegate/src/test/ResizeTestHelper.hpp
deleted file mode 100644
index 6937a4ba43..0000000000
--- a/delegate/src/test/ResizeTestHelper.hpp
+++ /dev/null
@@ -1,194 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode,
- tflite::TensorType inputTensorType,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& sizeTensorData,
- const std::vector <int32_t>& sizeTensorShape,
- const std::vector <int32_t>& outputTensorShape)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
- sizeof(int32_t) * sizeTensorData.size())));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(), inputTensorShape.size()),
- inputTensorType,
- 1,
- flatBufferBuilder.CreateString("input_tensor"));
-
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
- sizeTensorShape.size()),
- TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("size_input_tensor"));
-
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- inputTensorType,
- 3,
- flatBufferBuilder.CreateString("output_tensor"));
-
- // Create Operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
- flatbuffers::Offset<void> operatorBuiltinOption = 0;
- switch (operatorCode)
- {
- case BuiltinOperator_RESIZE_BILINEAR:
- {
- operatorBuiltinOption = CreateResizeBilinearOptions(flatBufferBuilder, false, false).Union();
- operatorBuiltinOptionsType = tflite::BuiltinOptions_ResizeBilinearOptions;
- break;
- }
- case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
- {
- operatorBuiltinOption = CreateResizeNearestNeighborOptions(flatBufferBuilder, false, false).Union();
- operatorBuiltinOptionsType = tflite::BuiltinOptions_ResizeNearestNeighborOptions;
- break;
- }
- default:
- break;
- }
-
- const std::vector<int> operatorInputs{0, 1};
- const std::vector<int> operatorOutputs{2};
- flatbuffers::Offset <Operator> resizeOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOption);
-
- const std::vector<int> subgraphInputs{0, 1};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&resizeOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Resize Biliniar Operator Model");
- flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, operatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&opCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& input1Values,
- std::vector<int32_t> input1Shape,
- std::vector<int32_t> input2NewShape,
- std::vector<int32_t> input2Shape,
- std::vector<float>& expectedOutputValues,
- std::vector<int32_t> expectedOutputShape)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer = CreateResizeTfLiteModel(operatorCode,
- ::tflite::TensorType_FLOAT32,
- input1Shape,
- input2NewShape,
- input2Shape,
- expectedOutputShape);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // The model will be executed using tflite and using the armnn delegate so that the outputs
- // can be compared.
-
- // Create TfLite Interpreter with armnn delegate
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create TfLite Interpreter without armnn delegate
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for the armnn interpreter
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input1Values);
- armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input2NewShape);
-
- // Set input data for the tflite interpreter
- armnnDelegate::FillInput(tfLiteInterpreter, 0, input1Values);
- armnnDelegate::FillInput(tfLiteInterpreter, 1, input2NewShape);
-
- // Run EnqueWorkload
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); i++)
- {
- CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
- CHECK(armnnDelegateOutputData[i] == doctest::Approx(tfLiteDelageOutputData[i]));
- }
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/RoundTest.cpp b/delegate/src/test/RoundTest.cpp
deleted file mode 100644
index 9d323f3700..0000000000
--- a/delegate/src/test/RoundTest.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RoundTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void FloorFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape {1, 3, 2, 3};
- std::vector<int32_t> outputShape {1, 3, 2, 3};
-
- std::vector<float> inputValues { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f };
-
- std::vector<float> expectedOutputValues { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
- 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f };
-
- RoundTest<float>(tflite::BuiltinOperator_FLOOR,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- inputValues,
- expectedOutputValues);
-}
-
-// FLOOR Test Suite
-TEST_SUITE("FLOOR_CpuRefTests")
-{
-
-TEST_CASE ("FLOOR_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- FloorFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("FLOOR_CpuAccTests")
-{
-
-TEST_CASE ("FLOOR_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- FloorFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("FLOOR_GpuAccTests")
-{
-
-TEST_CASE ("FLOOR_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- FloorFp32Test(backends);
-}
-
-}
-// End of FLOOR Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/RoundTestHelper.hpp b/delegate/src/test/RoundTestHelper.hpp
deleted file mode 100644
index 6638607dcf..0000000000
--- a/delegate/src/test/RoundTestHelper.hpp
+++ /dev/null
@@ -1,163 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& tensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- const std::vector<int32_t> operatorInputs({0});
- const std::vector<int32_t> operatorOutputs({1});
-
- flatbuffers::Offset<Operator> roundOperator;
- flatbuffers::Offset<flatbuffers::String> modelDescription;
- flatbuffers::Offset<OperatorCode> operatorCode;
-
- switch (roundOperatorCode)
- {
- case tflite::BuiltinOperator_FLOOR:
- default:
- roundOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
- modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Floor Operator Model");
- operatorCode = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_FLOOR);
- break;
- }
- const std::vector<int32_t> subgraphInputs({0});
- const std::vector<int32_t> subgraphOutputs({1});
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&roundOperator, 1));
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T>
-void RoundTest(tflite::BuiltinOperator roundOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& shape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateRoundTfLiteModel(roundOperatorCode,
- tensorType,
- shape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- shape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/ShapeTest.cpp b/delegate/src/test/ShapeTest.cpp
deleted file mode 100644
index b49910adf6..0000000000
--- a/delegate/src/test/ShapeTest.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ShapeTestHelper.hpp"
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void ShapeSimpleTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape{ 1, 3, 2, 3 };
-
- std::vector<int32_t> inputValues{ 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, };
-
- std::vector<int32_t> expectedOutputShape{ 4 };
- std::vector<int32_t> expectedOutputValues{ 1, 3, 2, 3 };
-
- ShapeTest<int32_t, int32_t>(::tflite::TensorType_INT32,
- ::tflite::TensorType_INT32,
- backends,
- inputShape,
- inputValues,
- expectedOutputValues,
- expectedOutputShape);
-}
-
-// SHAPE Test Suite
-TEST_SUITE("SHAPE_CpuRefTests")
-{
-
-TEST_CASE("SHAPE_Simple_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- ShapeSimpleTest(backends);
-}
-
-}
-// End of SHAPE Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/ShapeTestHelper.hpp b/delegate/src/test/ShapeTestHelper.hpp
deleted file mode 100644
index 9b3d574e23..0000000000
--- a/delegate/src/test/ShapeTestHelper.hpp
+++ /dev/null
@@ -1,173 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
- tflite::TensorType outputTensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- inputTensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- outputTensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- const std::vector<int32_t> operatorInputs({ 0 });
- const std::vector<int32_t> operatorOutputs({ 1 });
-
- flatbuffers::Offset<Operator> shapeOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
- operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- BuiltinOptions_ShapeOptions,
- CreateShapeOptions(flatBufferBuilder, outputTensorType).Union());
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: SHAPE Operator Model");
-
- flatbuffers::Offset<OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_SHAPE);
-
- const std::vector<int32_t> subgraphInputs({ 0 });
- const std::vector<int32_t> subgraphOutputs({ 1 });
-
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
- subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
- subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&shapeOperator, 1));
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T, typename K>
-void ShapeTest(tflite::TensorType inputTensorType,
- tflite::TensorType outputTensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<T>& inputValues,
- std::vector<K>& expectedOutputValues,
- std::vector<int32_t>& expectedOutputShape,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateShapeTfLiteModel(inputTensorType,
- outputTensorType,
- inputShape,
- expectedOutputShape,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
-
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
-
- std::unique_ptr < TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete) >
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
-
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
- armnnDelegate,
- expectedOutputShape,
- expectedOutputValues,
- 0);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/SliceTest.cpp b/delegate/src/test/SliceTest.cpp
deleted file mode 100644
index 1d7133f1fd..0000000000
--- a/delegate/src/test/SliceTest.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "SliceTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void SliceFixtureSimpleTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3 };
- std::vector<int32_t> outputShape { 2, 1, 3 };
- std::vector<int32_t> beginShape { 3 };
- std::vector<int32_t> sizeShape { 3 };
-
- std::vector<int32_t> beginData { 1, 0, 0 };
- std::vector<int32_t> sizeData { 2, 1, 3 };
- std::vector<float> inputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
- 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
- std::vector<float> outputData { 3.0f, 3.0f, 3.0f,
- 5.0f, 5.0f, 5.0f };
-
- SliceTestImpl<float>(
- backends,
- inputData,
- outputData,
- beginData,
- sizeData,
- inputShape,
- beginShape,
- sizeShape,
- outputShape);
-}
-
-TEST_SUITE("Slice_CpuRefTests")
-{
-
-TEST_CASE ("Slice_Simple_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SliceFixtureSimpleTest(backends);
-}
-
-} // Slice_CpuRefTests TestSuite
-
-
-
-TEST_SUITE("Slice_CpuAccTests")
-{
-
-TEST_CASE ("Slice_Simple_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SliceFixtureSimpleTest(backends);
-}
-
-} // Slice_CpuAccTests TestSuite
-
-
-
-TEST_SUITE("StridedSlice_GpuAccTests")
-{
-
-TEST_CASE ("Slice_Simple_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SliceFixtureSimpleTest(backends);
-}
-
-} // Slice_GpuAccTests TestSuite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/SliceTestHelper.hpp b/delegate/src/test/SliceTestHelper.hpp
deleted file mode 100644
index 94c076b4f7..0000000000
--- a/delegate/src/test/SliceTestHelper.hpp
+++ /dev/null
@@ -1,183 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-#include <armnn/DescriptorsFwd.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& beginTensorData,
- const std::vector<int32_t>& sizeTensorData,
- const std::vector<int32_t>& beginTensorShape,
- const std::vector<int32_t>& sizeTensorShape,
- const std::vector<int32_t>& outputTensorShape)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- flatbuffers::Offset<tflite::Buffer> buffers[5] = {
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
- sizeof(int32_t) * beginTensorData.size())),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
- sizeof(int32_t) * sizeTensorData.size())),
- CreateBuffer(flatBufferBuilder)
- };
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"));
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
- beginTensorShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("begin_tensor"));
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
- sizeTensorShape.size()),
- ::tflite::TensorType_INT32,
- 3,
- flatBufferBuilder.CreateString("size_tensor"));
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 4,
- flatBufferBuilder.CreateString("output"));
-
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SliceOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateSliceOptions(flatBufferBuilder).Union();
-
- const std::vector<int> operatorInputs{ 0, 1, 2 };
- const std::vector<int> operatorOutputs{ 3 };
- flatbuffers::Offset <Operator> sliceOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ 0, 1, 2 };
- const std::vector<int> subgraphOutputs{ 3 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&sliceOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Slice Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- BuiltinOperator_SLICE);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers, 5));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void SliceTestImpl(std::vector<armnn::BackendId>& backends,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- std::vector<int32_t>& beginTensorData,
- std::vector<int32_t>& sizeTensorData,
- std::vector<int32_t>& inputTensorShape,
- std::vector<int32_t>& beginTensorShape,
- std::vector<int32_t>& sizeTensorShape,
- std::vector<int32_t>& outputTensorShape)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateSliceTfLiteModel(
- ::tflite::TensorType_FLOAT32,
- inputTensorShape,
- beginTensorData,
- sizeTensorData,
- beginTensorShape,
- sizeTensorShape,
- outputTensorShape);
-
- auto tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShape,
- expectedOutputValues);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-} // End of Slice Test
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/SoftmaxTest.cpp b/delegate/src/test/SoftmaxTest.cpp
deleted file mode 100644
index 3339c09918..0000000000
--- a/delegate/src/test/SoftmaxTest.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "SoftmaxTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-TEST_SUITE ("Softmax_GpuAccTests")
-{
-
-TEST_CASE ("Softmax_Standard_Beta_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
- 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
-}
-
-TEST_CASE ("Softmax_Different_Beta_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- std::vector<float> expectedOutput = {0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, 0.352414012,
- 0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
-
-}
-
-TEST_CASE ("Log_Softmax_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- std::vector<float> expectedOutput =
- {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
- -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_GpuAccTests")
-
-TEST_SUITE ("Softmax_CpuRefTests")
-{
-
-TEST_CASE ("Softmax_Standard_Beta_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<float> expectedOutput = {
- 0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
- 0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
-}
-
-TEST_CASE ("Softmax_Different_Beta_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<float> expectedOutput = {
- 0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
- 0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
- SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
-}
-
-TEST_CASE ("Log_Softmax_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- std::vector<float> expectedOutput =
- {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
- -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
- SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
-}
-} // TEST_SUITE ("Softmax_CpuRefTests")
-} // namespace armnnDelegate
diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp
deleted file mode 100644
index f3367f9d24..0000000000
--- a/delegate/src/test/SoftmaxTestHelper.hpp
+++ /dev/null
@@ -1,194 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn_delegate.hpp>
-#include <armnnUtils/FloatingPointComparison.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& tensorShape,
- float beta)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 1);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
- tensorShape.size()),
- tensorType,
- 2);
-
- const std::vector<int32_t> operatorInputs({0});
- const std::vector<int32_t> operatorOutputs({1});
-
- flatbuffers::Offset<Operator> softmaxOperator;
- flatbuffers::Offset<flatbuffers::String> modelDescription;
- flatbuffers::Offset<OperatorCode> operatorCode;
-
- switch (softmaxOperatorCode)
- {
- case tflite::BuiltinOperator_SOFTMAX:
- softmaxOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_SoftmaxOptions,
- CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
- modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
- operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_SOFTMAX);
- break;
- case tflite::BuiltinOperator_LOG_SOFTMAX:
- softmaxOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_LogSoftmaxOptions,
- CreateLogSoftmaxOptions(flatBufferBuilder).Union());
- flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
- operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_LOG_SOFTMAX);
- break;
- default:
- break;
- }
- const std::vector<int32_t> subgraphInputs({0});
- const std::vector<int32_t> subgraphOutputs({1});
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&softmaxOperator, 1));
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& shape,
- std::vector<float>& inputValues,
- std::vector<float>& expectedOutputValues,
- float beta = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
- tensorType,
- shape,
- beta);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteInterpreterInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
- for (size_t i = 0; i < inputValues.size(); ++i)
- {
- CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 0.1));
- CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i],
- armnnDelegateOutputData[i], 0.1));
- }
-}
-
-
-/// Convenience function to run softmax and log-softmax test cases
-/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
-/// \param backends armnn backends to target
-/// \param beta multiplicative parameter to the softmax function
-/// \param expectedOutput to be checked against transformed input
-void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
- std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
- std::vector<float> input = {
- 1.0, 2.5, 3.0, 4.5, 5.0,
- -1.0, -2.5, -3.0, -4.5, -5.0};
- std::vector<int32_t> shape = {2, 5};
-
- SoftmaxTest(operatorCode,
- tflite::TensorType_FLOAT32,
- backends,
- shape,
- input,
- expectedOutput,
- beta);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/SpaceDepthTest.cpp b/delegate/src/test/SpaceDepthTest.cpp
deleted file mode 100644
index f80e749b87..0000000000
--- a/delegate/src/test/SpaceDepthTest.cpp
+++ /dev/null
@@ -1,207 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "SpaceDepthTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void DepthToSpaceFp32Test(std::vector<armnn::BackendId>& backends, int blockSize)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 4 };
- std::vector<int32_t> outputShape { 1, 4, 4, 1 };
-
- std::vector<float> inputValues = { 1.f, 2.f, 3.f, 4.f,
- 5.f, 6.f, 7.f, 8.f,
- 9.f, 10.f, 11.f, 12.f,
- 13.f, 14.f, 15.f, 16.f };
-
- std::vector<float> expectedOutputValues = { 1.f, 2.f, 5.f, 6.f,
- 3.f, 4.f, 7.f, 8.f,
- 9.f, 10.f, 13.f, 14.f,
- 11.f, 12.f, 15.f, 16.f };
-
- SpaceDepthTest<float>(tflite::BuiltinOperator_DEPTH_TO_SPACE,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- blockSize);
-}
-
-void DepthToSpaceUint8Test(std::vector<armnn::BackendId>& backends, int blockSize)
-{
- // Set input data
- std::vector<int32_t> inputShape { 2, 1, 1, 4 };
- std::vector<int32_t> outputShape { 2, 2, 2, 1 };
-
- std::vector<uint8_t> inputValues = { 1, 2, 3, 4,
- 5, 6, 7, 8 };
-
- std::vector<uint8_t> expectedOutputValues = { 1, 2, 3, 4,
- 5, 6, 7, 8 };
-
- SpaceDepthTest<uint8_t>(tflite::BuiltinOperator_DEPTH_TO_SPACE,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- blockSize);
-}
-
-void SpaceToDepthFp32Test(std::vector<armnn::BackendId>& backends, int blockSize)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 2 };
- std::vector<int32_t> outputShape { 1, 1, 1, 8 };
-
- std::vector<float> inputValues = { 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f };
- std::vector<float> expectedOutputValues = { 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f };
-
- SpaceDepthTest<float>(tflite::BuiltinOperator_SPACE_TO_DEPTH,
- ::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- blockSize);
-}
-
-void SpaceToDepthUint8Test(std::vector<armnn::BackendId>& backends, int blockSize)
-{
- // Set input data
- std::vector<int32_t> inputShape { 1, 2, 2, 1 };
- std::vector<int32_t> outputShape { 1, 1, 1, 4 };
-
- std::vector<uint8_t> inputValues = { 1, 2, 3, 2 };
- std::vector<uint8_t> expectedOutputValues = { 1, 2, 3, 2 };
-
- SpaceDepthTest<uint8_t>(tflite::BuiltinOperator_SPACE_TO_DEPTH,
- ::tflite::TensorType_UINT8,
- backends,
- inputShape,
- outputShape,
- inputValues,
- expectedOutputValues,
- blockSize);
-}
-
-TEST_SUITE("DepthToSpace_CpuRefTests")
-{
-
-TEST_CASE ("DepthToSpaceFp32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DepthToSpaceFp32Test(backends, 2);
-}
-
-TEST_CASE ("DepthToSpaceUint8Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- DepthToSpaceUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("DepthToSpace_CpuRefTests")
-
-
-TEST_SUITE("DepthToSpace_CpuAccTests")
-{
-
-TEST_CASE ("DepthToSpaceFp32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DepthToSpaceFp32Test(backends, 2);
-}
-
-TEST_CASE ("DepthToSpaceUint8Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- DepthToSpaceUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("DepthToSpace_CpuAccTests")
-
-TEST_SUITE("DepthToSpace_GpuAccTests")
-{
-
-TEST_CASE ("DepthToSpaceFp32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DepthToSpaceFp32Test(backends, 2);
-}
-
-TEST_CASE ("DepthToSpaceUint8Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- DepthToSpaceUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("DepthToSpace_GpuAccTests")
-
-TEST_SUITE("SpaceToDepth_CpuRefTests")
-{
-
-TEST_CASE ("SpaceToDepthFp32Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SpaceToDepthFp32Test(backends, 2);
-}
-
-TEST_CASE ("SpaceToDepthUint8Test_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- SpaceToDepthUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("SpaceToDepth_CpuRefTests")
-
-TEST_SUITE("SpaceToDepth_CpuAccTests")
-{
-
-TEST_CASE ("SpaceToDepthFp32Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SpaceToDepthFp32Test(backends, 2);
-}
-
-TEST_CASE ("SpaceToDepthUint8Test_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- SpaceToDepthUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("SpaceToDepth_CpuAccTests")
-
-TEST_SUITE("SpaceToDepth_GpuAccTests")
-{
-
-TEST_CASE ("SpaceToDepthFp32Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SpaceToDepthFp32Test(backends, 2);
-}
-
-TEST_CASE ("SpaceToDepthUint8Test_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- SpaceToDepthUint8Test(backends, 2);
-}
-
-} // TEST_SUITE("SpaceToDepth_GpuAccTests")
-
-} // namespace armnnDelegate
diff --git a/delegate/src/test/SpaceDepthTestHelper.hpp b/delegate/src/test/SpaceDepthTestHelper.hpp
deleted file mode 100644
index 737e199ef7..0000000000
--- a/delegate/src/test/SpaceDepthTestHelper.hpp
+++ /dev/null
@@ -1,168 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepthOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- int32_t blockSize)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ 1.0f }),
- flatBufferBuilder.CreateVector<int64_t>({ 0 }));
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 2,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
-
- const std::vector<int32_t> operatorInputs({0});
- const std::vector<int32_t> operatorOutputs({1});
-
- flatbuffers::Offset<Operator> spaceDepthOperator;
- flatbuffers::Offset<flatbuffers::String> modelDescription;
- flatbuffers::Offset<OperatorCode> operatorCode;
-
- switch (spaceDepthOperatorCode)
- {
- case tflite::BuiltinOperator_SPACE_TO_DEPTH:
- spaceDepthOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_SpaceToDepthOptions,
- CreateSpaceToDepthOptions(flatBufferBuilder, blockSize).Union());
- modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SPACE_TO_DEPTH Operator Model");
- operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_SPACE_TO_DEPTH);
- break;
- case tflite::BuiltinOperator_DEPTH_TO_SPACE:
- spaceDepthOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_DepthToSpaceOptions,
- CreateDepthToSpaceOptions(flatBufferBuilder, blockSize).Union());
- flatBufferBuilder.CreateString("ArmnnDelegate: DEPTH_TO_SPACE Operator Model");
- operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_DEPTH_TO_SPACE);
- break;
- default:
- break;
- }
- const std::vector<int32_t> subgraphInputs({0});
- const std::vector<int32_t> subgraphOutputs({1});
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&spaceDepthOperator, 1));
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
- flatBufferBuilder.Finish(flatbufferModel);
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& outputShape,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- int32_t blockSize = 2)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode,
- tensorType,
- inputShape,
- outputShape,
- blockSize);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
-}
-
-} // anonymous namespace
diff --git a/delegate/src/test/SplitTest.cpp b/delegate/src/test/SplitTest.cpp
deleted file mode 100644
index 5940516583..0000000000
--- a/delegate/src/test/SplitTest.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "SplitTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-// SPLIT Operator
-void SplitUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> axisShape { 1 };
- std::vector<int32_t> inputShape { 2, 2, 2, 2} ;
- std::vector<int32_t> outputShape0 { 2, 2, 2, 1 };
- std::vector<int32_t> outputShape1 { 2, 2, 2, 1 };
- std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
-
- std::vector<int32_t> axisData { 3 }; // Axis
- std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16 }; // Input
-
-
- std::vector<uint8_t> expectedOutputValues0 { 1, 3, 5, 7, 9, 11, 13, 15 };
- std::vector<uint8_t> expectedOutputValues1 { 2, 4, 6, 8, 10, 12, 14, 16 };
- std::vector<std::vector<uint8_t>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
-
- int32_t numSplits = 2;
-
- SplitTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
- axisShape,
- inputShape,
- outputShapes,
- axisData,
- inputValues,
- expectedOutputValues,
- numSplits);
-}
-
-void SplitFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> axisShape { 1 };
- std::vector<int32_t> inputShape { 2, 2, 2, 2 };
- std::vector<int32_t> outputShape0 { 2, 1, 2, 2 };
- std::vector<int32_t> outputShape1 { 2, 1, 2, 2 };
- std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
-
- std::vector<int32_t> axisData { 1 }; // Axis
- std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f }; // Input
-
-
- std::vector<float> expectedOutputValues0 { 1.0f, 2.0f, 3.0f, 4.0f, 9.0f, 10.0f, 11.0f, 12.0f };
- std::vector<float> expectedOutputValues1 { 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, 14.0f, 15.0f, 16.0f };
- std::vector<std::vector<float>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
-
- int32_t numSplits = 2;
-
- SplitTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- axisShape,
- inputShape,
- outputShapes,
- axisData,
- inputValues,
- expectedOutputValues,
- numSplits);
-}
-
-// SPLIT Test Suite
-TEST_SUITE("SPLIT_CpuRefTests")
-{
-
-TEST_CASE ("SPLIT_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_CpuAccTests")
-{
-
-TEST_CASE ("SPLIT_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_GpuAccTests")
-{
-
-TEST_CASE ("SPLIT_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitFp32Test(backends);
-}
-
-}
-// End of SPLIT Test Suite
-
-// SPLIT_V Operator
-void SplitVUint8Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> axisShape { 1 };
- std::vector<int32_t> inputShape { 2, 4, 2, 2 };
- std::vector<int32_t> splitsShape { 2 };
- std::vector<int32_t> outputShape0 { 2, 3, 2, 2 };
- std::vector<int32_t> outputShape1 { 2, 1, 2, 2 };
- std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
-
- std::vector<int32_t> axisData { 1 }; // Axis
- std::vector<int32_t> splitsData { 3, 1 }; // Splits
- std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32 }; // Input
-
-
- std::vector<uint8_t> expectedOutputValues0 { 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28 };
- std::vector<uint8_t> expectedOutputValues1 { 13, 14, 15, 16, 29, 30, 31, 32 };
- std::vector<std::vector<uint8_t>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
-
- int32_t numSplits = 2;
-
- SplitVTest<uint8_t>(::tflite::TensorType_UINT8,
- backends,
- inputShape,
- splitsShape,
- axisShape,
- outputShapes,
- inputValues,
- splitsData,
- axisData,
- expectedOutputValues,
- numSplits);
-}
-
-void SplitVFp32Test(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> axisShape { 1 };
- std::vector<int32_t> inputShape { 2, 4, 2, 2 };
- std::vector<int32_t> splitsShape { 2 };
- std::vector<int32_t> outputShape0 { 2, 3, 2, 2 };
- std::vector<int32_t> outputShape1 { 2, 1, 2, 2 };
- std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
-
- std::vector<int32_t> axisData { 1 }; // Axis
- std::vector<int32_t> splitsData { 3, 1 }; // Splits
- std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f,
- 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f,
- 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f }; // Input
-
-
- std::vector<float> expectedOutputValues0 { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f, 12.0f, 17.0f, 18.0f, 19.0f, 20.0f,
- 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f };
- std::vector<float> expectedOutputValues1 { 13.0f, 14.0f, 15.0f, 16.0f, 29.0f, 30.0f, 31.0f, 32.0f };
- std::vector<std::vector<float>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
-
- int32_t numSplits = 2;
-
- SplitVTest<float>(::tflite::TensorType_FLOAT32,
- backends,
- inputShape,
- splitsShape,
- axisShape,
- outputShapes,
- inputValues,
- splitsData,
- axisData,
- expectedOutputValues,
- numSplits);
-}
-
-// SPLIT_V Test Suite
-TEST_SUITE("SPLIT_V_CpuRefTests")
-{
-
-TEST_CASE ("SPLIT_V_Uint8_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitVUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_V_Fp32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- SplitVFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_V_CpuAccTests")
-{
-
-TEST_CASE ("SPLIT_V_Uint8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitVUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_V_Fp32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- SplitVFp32Test(backends);
-}
-
-}
-
-TEST_SUITE("SPLIT_V_GpuAccTests")
-{
-
-TEST_CASE ("SPLIT_V_Uint8_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitVUint8Test(backends);
-}
-
-TEST_CASE ("SPLIT_V_Fp32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- SplitVFp32Test(backends);
-}
-
-}
-// End of SPLIT_V Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/SplitTestHelper.hpp b/delegate/src/test/SplitTestHelper.hpp
deleted file mode 100644
index 3c5f50ffac..0000000000
--- a/delegate/src/test/SplitTestHelper.hpp
+++ /dev/null
@@ -1,370 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
- std::vector<int32_t>& axisTensorShape,
- std::vector<int32_t>& inputTensorShape,
- const std::vector<std::vector<int32_t>>& outputTensorShapes,
- std::vector<int32_t>& axisData,
- const int32_t numSplits,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
- sizeof(int32_t) * axisData.size())));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 4> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
- axisTensorShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("axis"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- // Create output tensor
- for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
- {
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors[i + 2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
- outputTensorShapes[i].size()),
- tensorType,
- (i+3),
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
- }
-
- // create operator. Mean uses ReducerOptions.
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitOptions(flatBufferBuilder, numSplits).Union();
-
- const std::vector<int> operatorInputs{ {0, 1} };
- const std::vector<int> operatorOutputs{ {2, 3} };
- flatbuffers::Offset <Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1} };
- const std::vector<int> subgraphOutputs{ {2, 3} };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void SplitTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& axisTensorShape,
- std::vector<int32_t>& inputTensorShape,
- std::vector<std::vector<int32_t>>& outputTensorShapes,
- std::vector<int32_t>& axisData,
- std::vector<T>& inputValues,
- std::vector<std::vector<T>>& expectedOutputValues,
- const int32_t numSplits,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
- axisTensorShape,
- inputTensorShape,
- outputTensorShapes,
- axisData,
- numSplits,
- quantScale,
- quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 1, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 1, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
- {
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShapes[i],
- expectedOutputValues[i],
- i);
- }
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-} // End of SPLIT Test
-
-std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
- std::vector<int32_t>& inputTensorShape,
- std::vector<int32_t>& splitsTensorShape,
- std::vector<int32_t>& axisTensorShape,
- const std::vector<std::vector<int32_t>>& outputTensorShapes,
- std::vector<int32_t>& splitsData,
- std::vector<int32_t>& axisData,
- const int32_t numSplits,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
- buffers[1] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(splitsData.data()),
- sizeof(int32_t) * splitsData.size()));
- buffers[2] = CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
- sizeof(int32_t) * axisData.size()));
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- std::array<flatbuffers::Offset<Tensor>, 5> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(splitsTensorShape.data(),
- splitsTensorShape.size()),
- ::tflite::TensorType_INT32,
- 1,
- flatBufferBuilder.CreateString("splits"),
- quantizationParameters);
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
- axisTensorShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("axis"),
- quantizationParameters);
-
- // Create output tensor
- for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
- {
- tensors[i + 3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
- outputTensorShapes[i].size()),
- tensorType,
- 0,
- flatBufferBuilder.CreateString("output"),
- quantizationParameters);
- }
-
- // create operator. Mean uses ReducerOptions.
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitVOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union();
-
- const std::vector<int> operatorInputs{ {0, 1, 2} };
- const std::vector<int> operatorOutputs{ {3, 4} };
- flatbuffers::Offset <Operator> controlOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ {0, 1, 2} };
- const std::vector<int> subgraphOutputs{ {3, 4} };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&controlOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void SplitVTest(tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputTensorShape,
- std::vector<int32_t>& splitsTensorShape,
- std::vector<int32_t>& axisTensorShape,
- std::vector<std::vector<int32_t>>& outputTensorShapes,
- std::vector<T>& inputValues,
- std::vector<int32_t>& splitsData,
- std::vector<int32_t>& axisData,
- std::vector<std::vector<T>>& expectedOutputValues,
- const int32_t numSplits,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
- inputTensorShape,
- splitsTensorShape,
- axisTensorShape,
- outputTensorShapes,
- splitsData,
- axisData,
- numSplits,
- quantScale,
- quantOffset);
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
- {
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShapes[i],
- expectedOutputValues[i],
- i);
- }
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-} // End of SPLIT_V Test
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/StridedSliceTest.cpp b/delegate/src/test/StridedSliceTest.cpp
deleted file mode 100644
index 43aea8a449..0000000000
--- a/delegate/src/test/StridedSliceTest.cpp
+++ /dev/null
@@ -1,241 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "StridedSliceTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void StridedSlice4DTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
- std::vector<int32_t> beginShape { 4 };
- std::vector<int32_t> endShape { 4 };
- std::vector<int32_t> strideShape { 4 };
-
- std::vector<int32_t> beginData { 1, 0, 0, 0 };
- std::vector<int32_t> endData { 2, 2, 3, 1 };
- std::vector<int32_t> strideData { 1, 1, 1, 1 };
- std::vector<float> inputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
- 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
- std::vector<float> outputData { 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f };
-
- StridedSliceTestImpl<float>(
- backends,
- inputData,
- outputData,
- beginData,
- endData,
- strideData,
- inputShape,
- beginShape,
- endShape,
- strideShape,
- outputShape
- );
-}
-
-void StridedSlice4DReverseTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3, 1 };
- std::vector<int32_t> outputShape { 1, 2, 3, 1 };
- std::vector<int32_t> beginShape { 4 };
- std::vector<int32_t> endShape { 4 };
- std::vector<int32_t> strideShape { 4 };
-
- std::vector<int32_t> beginData { 1, -1, 0, 0 };
- std::vector<int32_t> endData { 2, -3, 3, 1 };
- std::vector<int32_t> strideData { 1, -1, 1, 1 };
- std::vector<float> inputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
- 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
- std::vector<float> outputData { 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f };
-
- StridedSliceTestImpl<float>(
- backends,
- inputData,
- outputData,
- beginData,
- endData,
- strideData,
- inputShape,
- beginShape,
- endShape,
- strideShape,
- outputShape
- );
-}
-
-void StridedSliceSimpleStrideTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3, 1 };
- std::vector<int32_t> outputShape { 2, 1, 2, 1 };
- std::vector<int32_t> beginShape { 4 };
- std::vector<int32_t> endShape { 4 };
- std::vector<int32_t> strideShape { 4 };
-
- std::vector<int32_t> beginData { 0, 0, 0, 0 };
- std::vector<int32_t> endData { 3, 2, 3, 1 };
- std::vector<int32_t> strideData { 2, 2, 2, 1 };
- std::vector<float> inputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
- 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
- std::vector<float> outputData { 1.0f, 1.0f,
- 5.0f, 5.0f };
-
- StridedSliceTestImpl<float>(
- backends,
- inputData,
- outputData,
- beginData,
- endData,
- strideData,
- inputShape,
- beginShape,
- endShape,
- strideShape,
- outputShape
- );
-}
-
-void StridedSliceSimpleRangeMaskTest(std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 3, 2, 3, 1 };
- std::vector<int32_t> outputShape { 3, 2, 3, 1 };
- std::vector<int32_t> beginShape { 4 };
- std::vector<int32_t> endShape { 4 };
- std::vector<int32_t> strideShape { 4 };
-
- std::vector<int32_t> beginData { 1, 1, 1, 1 };
- std::vector<int32_t> endData { 1, 1, 1, 1 };
- std::vector<int32_t> strideData { 1, 1, 1, 1 };
-
- int beginMask = -1;
- int endMask = -1;
-
- std::vector<float> inputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
- 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
- std::vector<float> outputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
- 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
- 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
-
- StridedSliceTestImpl<float>(
- backends,
- inputData,
- outputData,
- beginData,
- endData,
- strideData,
- inputShape,
- beginShape,
- endShape,
- strideShape,
- outputShape,
- beginMask,
- endMask
- );
-}
-
-TEST_SUITE("StridedSlice_CpuRefTests")
-{
-
-TEST_CASE ("StridedSlice_4D_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSlice4DTest(backends);
-}
-
-TEST_CASE ("StridedSlice_4D_Reverse_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSlice4DReverseTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleStride_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSliceSimpleStrideTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleRange_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- StridedSliceSimpleRangeMaskTest(backends);
-}
-
-} // StridedSlice_CpuRefTests TestSuite
-
-
-
-TEST_SUITE("StridedSlice_CpuAccTests")
-{
-
-TEST_CASE ("StridedSlice_4D_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSlice4DTest(backends);
-}
-
-TEST_CASE ("StridedSlice_4D_Reverse_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSlice4DReverseTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleStride_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSliceSimpleStrideTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleRange_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- StridedSliceSimpleRangeMaskTest(backends);
-}
-
-} // StridedSlice_CpuAccTests TestSuite
-
-
-
-TEST_SUITE("StridedSlice_GpuAccTests")
-{
-
-TEST_CASE ("StridedSlice_4D_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSlice4DTest(backends);
-}
-
-TEST_CASE ("StridedSlice_4D_Reverse_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSlice4DReverseTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleStride_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSliceSimpleStrideTest(backends);
-}
-
-TEST_CASE ("StridedSlice_SimpleRange_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- StridedSliceSimpleRangeMaskTest(backends);
-}
-
-} // StridedSlice_GpuAccTests TestSuite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/StridedSliceTestHelper.hpp b/delegate/src/test/StridedSliceTestHelper.hpp
deleted file mode 100644
index ef944d7e7a..0000000000
--- a/delegate/src/test/StridedSliceTestHelper.hpp
+++ /dev/null
@@ -1,221 +0,0 @@
-//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-#include <armnn/DescriptorsFwd.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
- const std::vector<int32_t>& inputTensorShape,
- const std::vector<int32_t>& beginTensorData,
- const std::vector<int32_t>& endTensorData,
- const std::vector<int32_t>& strideTensorData,
- const std::vector<int32_t>& beginTensorShape,
- const std::vector<int32_t>& endTensorShape,
- const std::vector<int32_t>& strideTensorShape,
- const std::vector<int32_t>& outputTensorShape,
- const int32_t beginMask,
- const int32_t endMask,
- const int32_t ellipsisMask,
- const int32_t newAxisMask,
- const int32_t ShrinkAxisMask,
- const armnn::DataLayout& dataLayout)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- flatbuffers::Offset<tflite::Buffer> buffers[6] = {
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
- sizeof(int32_t) * beginTensorData.size())),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(endTensorData.data()),
- sizeof(int32_t) * endTensorData.size())),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(strideTensorData.data()),
- sizeof(int32_t) * strideTensorData.size())),
- CreateBuffer(flatBufferBuilder)
- };
-
- std::array<flatbuffers::Offset<Tensor>, 5> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"));
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
- beginTensorShape.size()),
- ::tflite::TensorType_INT32,
- 2,
- flatBufferBuilder.CreateString("begin_tensor"));
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(endTensorShape.data(),
- endTensorShape.size()),
- ::tflite::TensorType_INT32,
- 3,
- flatBufferBuilder.CreateString("end_tensor"));
- tensors[3] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(strideTensorShape.data(),
- strideTensorShape.size()),
- ::tflite::TensorType_INT32,
- 4,
- flatBufferBuilder.CreateString("stride_tensor"));
- tensors[4] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- 5,
- flatBufferBuilder.CreateString("output"));
-
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_StridedSliceOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions = CreateStridedSliceOptions(flatBufferBuilder,
- beginMask,
- endMask,
- ellipsisMask,
- newAxisMask,
- ShrinkAxisMask).Union();
-
- const std::vector<int> operatorInputs{ 0, 1, 2, 3 };
- const std::vector<int> operatorOutputs{ 4 };
- flatbuffers::Offset <Operator> sliceOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- const std::vector<int> subgraphInputs{ 0, 1, 2, 3 };
- const std::vector<int> subgraphOutputs{ 4 };
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&sliceOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: StridedSlice Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- BuiltinOperator_STRIDED_SLICE);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers, 6));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
- std::vector<T>& inputValues,
- std::vector<T>& expectedOutputValues,
- std::vector<int32_t>& beginTensorData,
- std::vector<int32_t>& endTensorData,
- std::vector<int32_t>& strideTensorData,
- std::vector<int32_t>& inputTensorShape,
- std::vector<int32_t>& beginTensorShape,
- std::vector<int32_t>& endTensorShape,
- std::vector<int32_t>& strideTensorShape,
- std::vector<int32_t>& outputTensorShape,
- const int32_t beginMask = 0,
- const int32_t endMask = 0,
- const int32_t ellipsisMask = 0,
- const int32_t newAxisMask = 0,
- const int32_t ShrinkAxisMask = 0,
- const armnn::DataLayout& dataLayout = armnn::DataLayout::NHWC)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateStridedSliceTfLiteModel(
- ::tflite::TensorType_FLOAT32,
- inputTensorShape,
- beginTensorData,
- endTensorData,
- strideTensorData,
- beginTensorShape,
- endTensorShape,
- strideTensorShape,
- outputTensorShape,
- beginMask,
- endMask,
- ellipsisMask,
- newAxisMask,
- ShrinkAxisMask,
- dataLayout);
-
- auto tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegate) == kTfLiteOk);
- CHECK(armnnDelegate != nullptr);
- CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteDelegate;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteDelegate) == kTfLiteOk);
- CHECK(tfLiteDelegate != nullptr);
- CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
- CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
- armnnDelegate,
- outputTensorShape,
- expectedOutputValues);
-
- tfLiteDelegate.reset(nullptr);
- armnnDelegate.reset(nullptr);
-} // End of StridedSlice Test
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/TestUtils.cpp b/delegate/src/test/TestUtils.cpp
deleted file mode 100644
index 9dce4461da..0000000000
--- a/delegate/src/test/TestUtils.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "TestUtils.hpp"
-
-namespace armnnDelegate
-{
-
-void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize)
-{
- auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(compareBool(tensor1[i], tensor2[i]));
- }
-}
-
-void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize)
-{
- auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(compareBool(tensor1[i], tensor2[i]));
- }
-}
-
-void CompareData(float tensor1[], float tensor2[], size_t tensorSize)
-{
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
- }
-}
-
-void CompareData(float tensor1[], float tensor2[], size_t tensorSize, float percentTolerance)
-{
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <=
- std::abs(tensor1[i]*percentTolerance/100));
- }
-}
-
-void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize)
-{
- uint8_t tolerance = 1;
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
- }
-}
-
-void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize)
-{
- int16_t tolerance = 1;
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
- }
-}
-
-void CompareData(int32_t tensor1[], int32_t tensor2[], size_t tensorSize)
-{
- int32_t tolerance = 1;
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
- }
-}
-
-void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize)
-{
- int8_t tolerance = 1;
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
- }
-}
-
-void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize)
-{
- for (size_t i = 0; i < tensorSize; i++)
- {
- CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
- }
-}
-
-void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize)
-{
- uint16_t tolerance = 1;
- for (size_t i = 0; i < tensorSize; i++)
- {
- uint16_t tensor1Data = tensor1[i].data;
- uint16_t tensor2Data = tensor2[i].data;
- CHECK(std::max(tensor1Data, tensor2Data) - std::min(tensor1Data, tensor2Data) <= tolerance);
- }
-}
-
-void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize) {
- uint16_t tolerance = 1;
- for (size_t i = 0; i < tensorSize; i++)
- {
- uint16_t tensor1Data = tensor1[i].data;
- uint16_t tensor2Data = half_float::detail::float2half<std::round_indeterminate, float>(tensor2[i]);
- CHECK(std::max(tensor1Data, tensor2Data) - std::min(tensor1Data, tensor2Data) <= tolerance);
- }
-}
-
-template <>
-void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
- std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<Half>& expectedOutputValues,
- unsigned int outputIndex)
-{
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
- auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
- auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
-
- CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
- CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
-
- for (size_t i = 0; i < expectedOutputShape.size(); i++)
- {
- CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
- }
-
- armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-}
-
-template <>
-void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
-{
- auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
- auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
-
- }
-}
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/TestUtils.hpp b/delegate/src/test/TestUtils.hpp
deleted file mode 100644
index 5d4a0ed7d4..0000000000
--- a/delegate/src/test/TestUtils.hpp
+++ /dev/null
@@ -1,101 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/interpreter.h>
-
-#include <doctest/doctest.h>
-
-#include <half/half.hpp>
-
-using Half = half_float::half;
-
-namespace armnnDelegate
-{
-
-/// Can be used to assign input data from a vector to a model input.
-/// Example usage can be found in ResizeTesthelper.hpp
-template <typename T>
-void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<T>& inputValues)
-{
- auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
- auto tfLiteDelageInputData = interpreter->typed_tensor<T>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-}
-
-template <>
-void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues);
-
-/// Can be used to compare bool data coming from a tflite interpreter
-/// Boolean types get converted to a bit representation in a vector. vector.data() returns a void pointer
-/// instead of a pointer to bool. Therefore a special function to compare to vector of bool is required
-void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize);
-void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize);
-
-/// Can be used to compare float data coming from a tflite interpreter with a tolerance of limit_of_float*100
-void CompareData(float tensor1[], float tensor2[], size_t tensorSize);
-
-/// Can be used to compare float data coming from a tflite interpreter with a given percentage tolerance
-void CompareData(float tensor1[], float tensor2[], size_t tensorSize, float percentTolerance);
-
-/// Can be used to compare int8_t data coming from a tflite interpreter with a tolerance of 1
-void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize);
-
-/// Can be used to compare uint8_t data coming from a tflite interpreter with a tolerance of 1
-void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize);
-
-/// Can be used to compare int16_t data coming from a tflite interpreter with a tolerance of 1
-void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize);
-
-/// Can be used to compare int32_t data coming from a tflite interpreter with a tolerance of 1
-void CompareData(int32_t tensor1[], int32_t tensor2[], size_t tensorSize);
-
-/// Can be used to compare Half (Float16) data with a tolerance of limit_of_float*100
-void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize);
-
-/// Can be used to compare TfLiteFloat16 data coming from a tflite interpreter
-void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize);
-
-/// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter
-void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize);
-
-/// Can be used to compare the output tensor shape and values
-/// from armnnDelegateInterpreter and tfLiteInterpreter.
-/// Example usage can be found in ControlTestHelper.hpp
-template <typename T>
-void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
- std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& expectedOutputValues,
- unsigned int outputIndex = 0)
-{
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
- auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
- auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
- auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
- CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
- CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
-
- for (size_t i = 0; i < expectedOutputShape.size(); i++)
- {
- CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
- CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
- }
-
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData , expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData , expectedOutputValues.data(), expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelegateOutputData , armnnDelegateOutputData , expectedOutputValues.size());
-}
-
-} // namespace armnnDelegate
diff --git a/delegate/src/test/TransposeTest.cpp b/delegate/src/test/TransposeTest.cpp
deleted file mode 100644
index 67751e325a..0000000000
--- a/delegate/src/test/TransposeTest.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "TransposeTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <doctest/doctest.h>
-#include <flatbuffers/flatbuffers.h>
-
-namespace armnnDelegate
-{
-
-TEST_SUITE ("Transpose_GpuAccTests")
-{
-
-TEST_CASE ("Transpose_Float32_GpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- TransposeFP32Test(backends);
-}
-
-}
-
-TEST_SUITE ("Transpose_CpuAccTests")
-{
-
-TEST_CASE ("Transpose_Float32_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- TransposeFP32Test(backends);
-}
-
-}
-
-TEST_SUITE ("Transpose_CpuRefTests")
-{
-TEST_CASE ("Transpose_Float32_CpuRef_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- TransposeFP32Test(backends);
-}
-}
-} // namespace armnnDelegate
diff --git a/delegate/src/test/TransposeTestHelper.hpp b/delegate/src/test/TransposeTestHelper.hpp
deleted file mode 100644
index 4479c486cb..0000000000
--- a/delegate/src/test/TransposeTestHelper.hpp
+++ /dev/null
@@ -1,177 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
- const std::vector <int32_t>& input0TensorShape,
- const std::vector <int32_t>& inputPermVecShape,
- const std::vector <int32_t>& outputTensorShape,
- const std::vector<int32_t>& inputPermVec)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
- flatbuffers::Offset<tflite::Buffer> buffers[4]{
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder),
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
- sizeof(int32_t) * inputPermVec.size())),
- CreateBuffer(flatBufferBuilder)
- };
- std::array<flatbuffers::Offset<Tensor>, 3> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
- input0TensorShape.size()),
- tensorType, 1);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputPermVecShape.data(),
- inputPermVecShape.size()),
- tflite::TensorType_INT32, 2,
- flatBufferBuilder.CreateString("permutation_vector"));
- tensors[2] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,3);
- const std::vector<int32_t> operatorInputs{0, 1};
- const std::vector<int32_t> operatorOutputs{2};
- flatbuffers::Offset <Operator> transposeOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- BuiltinOptions_TransposeOptions,
- CreateTransposeOptions(flatBufferBuilder).Union());
- const std::vector<int> subgraphInputs{0, 1};
- const std::vector<int> subgraphOutputs{2};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&transposeOperator, 1));
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_TRANSPOSE);
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers, 4));
- flatBufferBuilder.Finish(flatbufferModel);
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
-{
- using namespace tflite;
-
- // set test input data
- std::vector<int32_t> input0Shape {4, 2, 3};
- std::vector<int32_t> inputPermVecShape {3};
- std::vector<int32_t> outputShape {2, 3, 4};
-
- std::vector<float> input0Values = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
- std::vector<int32_t> inputPermVec = {2, 0, 1};
- std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
- 13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
-
- // create model
- std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
- input0Shape,
- inputPermVecShape,
- outputShape,
- inputPermVec);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data for tflite
- auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
- auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- tfLiteInterpreterInput0Data[i] = input0Values[i];
- }
-
- auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
- auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
- for (unsigned int i = 0; i < inputPermVec.size(); ++i)
- {
- tfLiteInterpreterInput1Data[i] = inputPermVec[i];
- }
-
- //Set input data for armnn delegate
- auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
- for (unsigned int i = 0; i < input0Values.size(); ++i)
- {
- armnnDelegateInput0Data[i] = input0Values[i];
- }
-
- auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
- auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
- for (unsigned int i = 0; i < inputPermVec.size(); ++i)
- {
- armnnDelegateInput1Data[i] = inputPermVec[i];
- }
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
- for (size_t i = 0; i < expectedOutputValues.size(); ++i)
- {
- CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
- CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
- CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
- }
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-}
diff --git a/delegate/src/test/UnidirectionalSequenceLstmTest.cpp b/delegate/src/test/UnidirectionalSequenceLstmTest.cpp
deleted file mode 100644
index 4bee715788..0000000000
--- a/delegate/src/test/UnidirectionalSequenceLstmTest.cpp
+++ /dev/null
@@ -1,1464 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "UnidirectionalSequenceLstmTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- //tensorInfo12,
- bool hasInputToInputWeights = true;
- std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
- -0.117484632f, 0.3298470976f, -0.1179017122f,
- 0.214305695f, 0.42135173085f, 0.003878414626f,
- -0.348303917f, -0.1881275477f, 0.0343011027f };
-
- std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
- -0.3810434485f, 0.268383264f, -0.009807467424f,
- -0.3522925403f, -0.24275735512f, -0.28344226125f,
- 0.13512269116f, -0.4932442977f, -0.10039821991f };
-
- std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
- 0.386399507f, -0.259465157985f, -0.16545993089f,
- -0.4230232555f, 0.341664791103f, -0.18127849691f,
- -0.2277662414f, -0.55275535589f, 0.34184026718f };
-
- std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
- 0.53969591851f, 0.23393625035f, -0.27140527306f,
- 0.50009280443f, 0.07511717046f, 0.3998299249f,
- -0.51717478049f, 0.1889653282f, -0.367323637f };
-
- //tensorInfo16,
- bool hasRecurrentToInputWeights = true;
- std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
- -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
- 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
- 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f };
-
- std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
- -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
- -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
- -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
-
- std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
- -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
- 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
- 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
-
- std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
- -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
- 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
- -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<float> cellToInputWeights;
- bool hasCellToForgetWeights = false;
- std::vector<float> cellToForgetWeights;
- bool hasCellToOutputWeights = false;
- std::vector<float> cellToOutputWeights;
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = {0., 0., 0., 0.};
- std::vector<float> forgetGateBias = {1., 1., 1., 1.};
- std::vector<float> cellBias = {0., 0., 0., 0.};
- std::vector<float> outputGateBias = {0., 0., 0., 0.};
-
- bool hasProjectionWeights = false;
- std::vector<float> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
- 3., 2., 1., 2., 3., 4.,
- 5., 4., 3., 2., 1., 2. };
- std::vector<float> expectedOutputValues = { -0.0714901f, -0.162117f, -0.175168f, -0.0232934f,
- -0.168107f, -0.414129f, -0.549875f, -0.00803579f,
- -0.0668735f, 0.204078f, -0.42765f, -0.0312321f,
- -0.120003f, -0.0941918f, -0.456391f, -0.0287019f,
- -0.0342921f, 0.20824f, -0.656989f, -0.00415265f,
- -0.10493f, 0.14211f, -0.583478f, -0.0329754f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor);
-}
-
-void UnidirectionalSequenceLstmTimeMajorTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- std::vector<int32_t> inputShape = {timeSize, batchSize, inputSize};
- std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
- std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
-
- std::vector<int32_t> outputTensorInfo = {timeSize, batchSize, outputSize};
-
- //tensorInfo12
- bool hasInputToInputWeights = true;
- std::vector<float> inputToInputWeights = { 0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
- 0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
- 0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
- -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f };
-
- std::vector<float> inputToForgetWeights = { -0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
- -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
- -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
- -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f };
-
- std::vector<float> inputToCellWeights = { -0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
- 0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
- 0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
- -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f };
-
- std::vector<float> inputToOutputWeights = { -0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
- -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
- 0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
- -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f };
-
- //tensorInfo16
- bool hasRecurrentToInputWeights = true;
- std::vector<float> recurrentToInputWeights = { 0.23788475990f, -0.24948765337f, 0.50044941902f, 0.14431896805f,
- -0.115940228137f, -0.717082679f, -0.17208620906f, 0.17850610617f,
- -0.16702319684f, -0.11384502053f, -0.309785276245f, -0.3316611672f,
- 0.52380162477f, -0.06839632987f, -0.391478359627f, -0.10756178963f };
-
- std::vector<float> recurrentToForgetWeights = { 0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
- 0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
- -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
- 0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f };
-
- std::vector<float> recurrentToCellWeights = { 0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
- -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
- -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
- -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f };
-
- std::vector<float> recurrentToOutputWeights = { -0.079031050201f, 0.041414566286f, -0.583727357285f, 0.1025384515f,
- -0.172372072937f, 0.09214124082f, 0.178184121827f, -0.2439443916f,
- 0.104485116899f, 0.2600405514f, 0.064414866268f, 0.24141204357f,
- 0.281875759363f, -0.14234502664f, 0.15126448862f, -0.24421440064f };
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<float> cellToInputWeights;
- bool hasCellToForgetWeights = false;
- std::vector<float> cellToForgetWeights;
- bool hasCellToOutputWeights = false;
- std::vector<float> cellToOutputWeights;
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = {0., 0., 0., 0.};
- std::vector<float> forgetGateBias = {1., 1., 1., 1.};
- std::vector<float> cellBias = {0., 0., 0., 0.};
- std::vector<float> outputGateBias = {0., 0., 0., 0.};
-
- bool hasProjectionWeights = false;
- std::vector<float> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
- 3., 2., 1., 2., 3., 4.,
- 5., 4., 3., 2., 1., 2. };
- std::vector<float> expectedOutputValues = { 0.135658f, 0.124673f, 0.021209f, -0.0530204f,
- 0.106138f, 0.0404792f, 0.0151644f, -0.00675166f,
- -0.0128514f, 0.0644884f, 0.0709072f, -0.0454045f,
- 0.162886f, 0.166494f, 0.0277046f, -0.0369807f,
- 0.111716f, 0.043119f, 0.0762981f, -0.0122854f,
- 0.104397f, 0.2144f, 0.119192f, -0.0839058f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = true;
-
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor);
-}
-
-void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 2;
- int32_t timeSize = 3;
- int32_t inputSize = 4;
- int32_t outputSize = 5;
- int32_t numUnits = 6;
-
- std::vector<int32_t> inputShape = {batchSize, timeSize, inputSize};
- std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
- std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
-
- std::vector<int32_t> outputTensorInfo = {batchSize, timeSize, outputSize};
-
- //tensorInfoInputSize,
- bool hasInputToInputWeights = true;
- std::vector<float> inputToInputWeights = { 0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
- -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
- -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
- -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
- -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
- -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f };
-
- std::vector<float> inputToForgetWeights = { -0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
- 0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
- 0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
- -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
- -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
- 0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
-
- std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
- -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
- -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
- -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
- -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
- 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f };
-
- std::vector<float> inputToOutputWeights = { -0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
- -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
- -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
- 0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
- 0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
- -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f };
-
- //tensorInfoOutputSize,
- bool hasRecurrentToInputWeights = true;
- std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
- -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
- -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
- -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
- 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
- 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
- -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
- 0.14283475f, -0.07390571f };
-
- std::vector<float> recurrentToForgetWeights = { -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
- 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
- -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
- 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
- 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
- -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
- -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
- 0.061878487f, -0.04729229f };
-
- std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
- 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
- 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
- -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
- 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
- 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
- -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
- -0.019443132f, -0.030755889f };
-
- std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,
- -0.045984812f,-0.01255415f, -0.0026479573f,
- -0.08196161f, -0.054914974f, -0.0046604523f,
- -0.029587349f, -0.044576716f, -0.07480124f,
- -0.082868785f, 0.023254942f, 0.027502948f,
- -0.0039728214f, -0.08683098f, -0.08116779f,
- -0.014675607f, -0.037924774f, -0.023314456f,
- -0.007401714f, -0.09255757f, 0.029460307f,
- -0.08829125f, -0.005139627f, -0.08989442f,
- -0.0555066f, 0.13596267f, 0.025062224f };
- // tensorInfoNumUnits
- bool hasCellToInputWeights = true;
- std::vector<float> cellToInputWeights = { 0.040369894f, 0.030746894f, 0.24704495f,
- 0.018586371f, -0.037586458f, -0.15312155f };
- bool hasCellToForgetWeights = true;
- std::vector<float> cellToForgetWeights = { -0.01998659f, -0.15568835f, -0.24248174f,
- -0.012770197f, 0.041331276f, -0.072311886f };
- bool hasCellToOutputWeights = true;
- std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f,
- 0.002913762f, 0.17764764f, -0.5495371f };
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f,
- 0.10380666f, 0.053110216f, -0.06928846f };
- std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.03032477f,
- 0.23027696f, 0.11098921f, 0.08989442f };
- std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f,
- 0.033463873f, -0.1483596f, 0.029460307f };
- std::vector<float> outputGateBias = { 0.046159424f, -0.0012809046f, 0.03563469f,
- 0.12648113f, 0.027195795f, 0.35373217f };
-
- bool hasProjectionWeights = true;
- std::vector<float> projectionWeights = { -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
- 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
- -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
- -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
- 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
- 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f };
-
- bool hasProjectionBias = true;
- std::vector<float> projectionBias(outputSize, 0.f);
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
- 3., 2., 1., 2., 3., 4.,
- 5., 4., 3., 2., 1., 2.,
- 1., 2., 3., 4., 5., 4.};
- std::vector<float> expectedOutputValues = { -0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
- -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
- -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
- 0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
- -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
- -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0126895f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor);
-}
-
-void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- //tensorInfo12
- bool hasInputToInputWeights = false;
- std::vector<float> inputToInputWeights{};
-
- std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
- -0.3810434485f, 0.268383264f, -0.009807467424f,
- -0.3522925403f, -0.24275735512f, -0.28344226125f,
- 0.13512269116f, -0.4932442977f, -0.10039821991f };
-
- std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
- 0.386399507f, -0.259465157985f, -0.16545993089f,
- -0.4230232555f, 0.341664791103f, -0.18127849691f,
- -0.2277662414f, -0.55275535589f, 0.34184026718f };
-
- std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
- 0.53969591851f, 0.23393625035f, -0.27140527306f,
- 0.50009280443f, 0.07511717046f, 0.3998299249f,
- -0.51717478049f, 0.1889653282f, -0.367323637f };
-
- //tensorInfo16
- bool hasRecurrentToInputWeights = false;
- std::vector<float> recurrentToInputWeights{};
-
- std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
- -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
- -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
- -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
-
- std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
- -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
- 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
- 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
-
- std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
- -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
- 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
- -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<float> cellToInputWeights;
- bool hasCellToForgetWeights = true;
- std::vector<float> cellToForgetWeights = {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
- bool hasCellToOutputWeights = true;
- std::vector<float> cellToOutputWeights = {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
-
- bool hasInputGateBias = false;
- std::vector<float> inputGateBias;
- std::vector<float> forgetGateBias = {1., 1., 1., 1.};
- std::vector<float> cellBias = {0., 0., 0., 0.};
- std::vector<float> outputGateBias = {0., 0., 0., 0.};
-
- bool hasProjectionWeights = false;
- std::vector<float> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
- 3., 2., 1., 2., 3., 4.,
- 5., 4., 3., 2., 1., 2. };
- std::vector<float> expectedOutputValues = { -0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
- -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
- -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
- -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
- -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
- -0.031675f, 0.125987f, -0.526695f, -0.110093f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor);
-}
-
-void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(
- std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- int32_t numUnits = 5;
-
- //tensorInfo15
- bool hasInputToInputWeights = true;
- std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
- -0.117484632f, 0.3298470976f, -0.1179017122f,
- 0.214305695f, 0.42135173085f, 0.003878414626f,
- -0.348303917f, -0.1881275477f, 0.0343011027f,
- -0.38837709614f, -0.05636804124f, 0.4259087456f};
-
- std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
- -0.3810434485f, 0.268383264f, -0.009807467424f,
- -0.3522925403f, -0.24275735512f, -0.28344226125f,
- 0.13512269116f, -0.4932442977f, -0.10039821991f,
- 0.2726137042f, 0.09216640889f, -0.06551410215f};
-
- std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
- 0.386399507f, -0.259465157985f, -0.16545993089f,
- -0.4230232555f, 0.341664791103f, -0.18127849691f,
- -0.2277662414f, -0.55275535589f, 0.34184026718f,
- 0.3954237699f, -0.19407111404f, 0.30412107706f};
-
- std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
- 0.53969591851f, 0.23393625035f, -0.27140527306f,
- 0.50009280443f, 0.07511717046f, 0.3998299249f,
- -0.51717478049f, 0.1889653282f, -0.367323637f,
- -0.12584099173f, -0.12319286912f, 0.2407919466f};
-
- //tensorInfo20
- bool hasRecurrentToInputWeights = true;
- std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
- -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
- 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
- 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
- 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f };
-
- std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
- -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
- -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
- -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
- 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f };
-
- std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
- -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
- 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
- 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
- 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f };
-
- std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
- -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
- 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
- -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f,
- 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f };
- // tensorInfo5
- bool hasCellToInputWeights = true;
- std::vector<float> cellToInputWeights = { 0.05f, 0.1f, 0.25f, 0.15f, -0.02f };
- bool hasCellToForgetWeights = true;
- std::vector<float> cellToForgetWeights = { -0.02f, -0.15f, -0.25f, -0.03f, 0.15f };
- bool hasCellToOutputWeights = true;
- std::vector<float> cellToOutputWeights = { 0.1f, -0.1f, -0.5f, 0.05f, 0.01f };
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
- std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
- std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
- std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
-
- bool hasProjectionWeights = true;
- std::vector<float> projectionWeights = { -0.1f, 0.2f, 0.01f, -0.2f,
- 0.1f, 0.5f, 0.3f, 0.08f,
- 0.07f, 0.2f, -0.4f, 0.2f,
- 0.5f, -0.4f, 0.3f, -0.2f,
- 0.3f, 0.08f, -0.07f, 0.2f}; //{outputSize, numUnits}
- bool hasProjectionBias = true;
- std::vector<float> projectionBias(outputSize, 0.f);;
-
- bool hasInputLayerNormWeights = true;
- std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.8f };
- bool hasForgetLayerNormWeights = true;
- std::vector<float> forgetLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
- bool hasCellLayerNormWeights = true;
- std::vector<float> cellLayerNormWeights = { 0.7f, 0.2f, 0.3f, 0.8f, 0.5f };
- bool hasOutputLayerNormWeights = true;
- std::vector<float> outputLayerNormWeights = { 0.6f, 0.2f, 0.2f, 0.5f, 0.1f };
-
- std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
- 3., 2., 1., 2., 3., 4.,
- 5., 4., 3., 2., 1., 2. };
- std::vector<float> expectedOutputValues = { 0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
- 0.11458f, 0.0407109f, 0.300327f, 0.174301f,
- 0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
- 0.108008f, 0.0386623f, 0.273471f, 0.167115f,
- 0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
- 0.106649f, 0.0276847f, 0.229863f, 0.166958f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<float>(backends,
- ::tflite::TensorType_FLOAT32,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor);
-}
-
-void UnidirectionalSequenceLstmInt8Test(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- //tensorInfo12
- bool hasInputToInputWeights = true;
- std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
-
- std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
-
- std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
-
- std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
-
- //tensorInfo16
- bool hasRecurrentToInputWeights = true;
- std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
-
- std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
-
- std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
-
- std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
-
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<int8_t> cellToInputWeights;
- bool hasCellToForgetWeights = false;
- std::vector<int8_t> cellToForgetWeights;
- bool hasCellToOutputWeights = false;
- std::vector<int8_t> cellToOutputWeights;
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = { 0., 0., 0., 0. };
- std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
- std::vector<float> cellBias = { 0., 0., 0., 0. };
- std::vector<float> outputGateBias = { 0., 0., 0., 0. };
-
- bool hasProjectionWeights = false;
- std::vector<int8_t> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
- 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
- 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
-
- std::vector<float> expectedOutputValues = { -0.0142517f, -0.0198845f, -0.0120569f, -0.0116868f,
- -0.0350714f, -0.0343202f, -0.047504f, -0.0569789f,
- -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
- -0.0294759f, -0.0129935f, -0.0444175f, -0.0444354f,
- -0.0280855f, 0.00545101f, -0.051422f, -0.0463838f,
- -0.0310702f, 0.00915739f, -0.0625207f, -0.0482648f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- 0.1f);
-}
-
-void UnidirectionalSequenceLstmInt8TimeMajorTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- //tensorInfo12
- bool hasInputToInputWeights = true;
- std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
-
- std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
-
- std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
-
- std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
-
- //tensorInfo16
- bool hasRecurrentToInputWeights = true;
- std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
-
- std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
-
- std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
-
- std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
-
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<int8_t> cellToInputWeights;
- bool hasCellToForgetWeights = false;
- std::vector<int8_t> cellToForgetWeights;
- bool hasCellToOutputWeights = false;
- std::vector<int8_t> cellToOutputWeights;
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = { 0., 0., 0., 0. };
- std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
- std::vector<float> cellBias = { 0., 0., 0., 0. };
- std::vector<float> outputGateBias = { 0., 0., 0., 0. };
-
- bool hasProjectionWeights = false;
- std::vector<int8_t> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
- 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
- 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
-
- std::vector<float> expectedOutputValues = { -0.0142517f, -0.0198845f, -0.0120122f, -0.0116868f,
- -0.0261295f, -0.0188487f, -0.0345463f, -0.049733f,
- -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
- -0.0291863f, -0.0369402f, -0.0354071f, -0.0296529f,
- -0.0419539f, -0.00617731f, -0.0814796f, -0.0804005f,
- -0.0244737f, 0.0119905f, -0.0457527f, -0.0331862f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = true;
-
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- 0.1);
-}
-
-void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- int32_t numUnits = 4;
-
- bool hasInputToInputWeights = true;
- std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
-
- std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
-
- std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
-
- std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
-
- //tensorInfo16
- bool hasRecurrentToInputWeights = true;
- std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
-
- std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
-
- std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
-
- std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
-
- // tensorInfo4
- bool hasCellToInputWeights = true;
- std::vector<int8_t> cellToInputWeights = { 5, 10, 25, 15 };
- bool hasCellToForgetWeights = true;
- std::vector<int8_t> cellToForgetWeights = { -5, 15, 25, 3 };
- bool hasCellToOutputWeights = true;
- std::vector<int8_t> cellToOutputWeights = { 10, -10, -5, 50 };
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f};
- std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.3032477f, 0.23027696f};
- std::vector<float> cellBias = { -0.124379363f, 0.55531194f, 0.23377132f, 0.033463873f };
- std::vector<float> outputGateBias = { 0.046159424f, -0.12809046f, 0.03563469f, 0.12648113f };
-
- bool hasProjectionWeights = true;
- std::vector<int8_t> projectionWeights = { -25, 51, 3, -5, 25, 127, 77, 20, 18, 51, -10, 51, -25, 88, 77, -13 };
- bool hasProjectionBias = true;
- std::vector<float> projectionBias(outputSize, 0.f);
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
- 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
- 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
-
- std::vector<float> expectedOutputValues = { 0.612103f, 1.56788f, 0.31966f, 1.42956f,
- 0.909718f, 3.07916f, -0.560586f, 3.8907f,
- 0.753671f, 1.77485f, 0.365122f, 1.60077f,
- 0.812644f, 2.79092f, -0.605396f, 3.61742f,
- 0.791857f, 1.64353f, 0.316588f, 1.55192f,
- 0.807265f, 2.47012f, -0.539598f, 3.25654f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- 0.1f);
-}
-
-void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- // cellSize and outputSize have the same size when there is no projection.
- int32_t numUnits = outputSize;
-
- //tensorInfo12,
- bool hasInputToInputWeights = false;
- std::vector<int8_t> inputToInputWeights;
-
- std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
-
- std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
-
- std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
-
- //tensorInfo16,
- bool hasRecurrentToInputWeights = false;
- std::vector<int8_t> recurrentToInputWeights;
- std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
-
- std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
-
- std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
-
- // tensorInfo4
- bool hasCellToInputWeights = false;
- std::vector<int8_t> cellToInputWeights;
- bool hasCellToForgetWeights = true;
- std::vector<int8_t> cellToForgetWeights = { 47, -52, -24, 31 };
- bool hasCellToOutputWeights = true;
- std::vector<int8_t> cellToOutputWeights = { -17, 82, 85, -77 };
-
- bool hasInputGateBias = false;
- std::vector<float> inputGateBias;
- std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
- std::vector<float> cellBias = { 0., 0., 0., 0. };
- std::vector<float> outputGateBias = { 0., 0., 0., 0. };
-
- bool hasProjectionWeights = false;
- std::vector<int8_t> projectionWeights;
- bool hasProjectionBias = false;
- std::vector<float> projectionBias;
-
- bool hasInputLayerNormWeights = false;
- std::vector<float> inputLayerNormWeights;
- bool hasForgetLayerNormWeights = false;
- std::vector<float> forgetLayerNormWeights;
- bool hasCellLayerNormWeights = false;
- std::vector<float> cellLayerNormWeights;
- bool hasOutputLayerNormWeights = false;
- std::vector<float> outputLayerNormWeights;
-
- std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
- 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
- 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
-
- std::vector<float> expectedOutputValues = { -0.0072104f, -0.00991171f, -0.00650478f, -0.00713055f,
- -0.0191782f, -0.0161269f, -0.0233683f, -0.054299f,
- -0.00783725f, 0.00635271f, -0.0126718f, -0.022613f,
- -0.0161351f, -0.00775868f, -0.021054f, -0.0339778f,
- -0.0146392f, 0.00330261f, -0.0258733f, -0.0407797f,
- -0.0174297f, 0.0050105f, -0.0266275f, -0.0362564f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- 0.1);
-}
-
-void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(
- std::vector<armnn::BackendId>& backends)
-{
- int32_t batchSize = 3;
- int32_t timeSize = 2;
- int32_t inputSize = 3;
- int32_t outputSize = 4;
- int32_t numUnits = 5;
-
- bool hasInputToInputWeights = true;
- std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3, 2, 2, -4 };
-
- std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1, -3, -2, -4 };
-
- std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3, 2, 5, -4 };
-
- std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4, -4, -1, -1 };
-
- bool hasRecurrentToInputWeights = true;
- std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
- 5, -1, 1, 3, -1, -1, -1, 4, 2, 3 };
-
- std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
- 5, -1, 1, 3, -2, -1, -1, 2, 2, 1 };
-
- std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2,
- 1, 2, 3, -2, 3, -3, -1, -5, 1, 3 };
-
- std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3,
- -4, -1, -1, -1, 2, -1, 5, 1, -3, -4 };
-
- // tensorInfo5
- bool hasCellToInputWeights = true;
- std::vector<int8_t> cellToInputWeights = { 5, 3, 8, -5, 2 };
- bool hasCellToForgetWeights = true;
- std::vector<int8_t> cellToForgetWeights = { -2, -7, 5, -3, 4 };
- bool hasCellToOutputWeights = true;
- std::vector<int8_t> cellToOutputWeights = { 9, -10 , -5, 5, 1 };
-
- bool hasInputGateBias = true;
- std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
- std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
- std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
- std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
-
- bool hasProjectionWeights = true;
- std::vector<int8_t> projectionWeights = { -1, 2, 1, -2, 1, 5, 3, 8, 7, 2,
- -4, 2, 5, -4, 3, -2, 3, 8, -7, 2 };
- bool hasProjectionBias = true;
- std::vector<float> projectionBias(outputSize, 0.f);
-
- bool hasInputLayerNormWeights = true;
- std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, -0.3f, -0.1f, 0.5f };
- bool hasForgetLayerNormWeights = true;
- std::vector<float> forgetLayerNormWeights = { -0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
- bool hasCellLayerNormWeights = true;
- std::vector<float> cellLayerNormWeights = { 0.5f, 0.2f, 0.3f, 0.4f, -0.5f };
- bool hasOutputLayerNormWeights = true;
- std::vector<float> outputLayerNormWeights = { 0.6f, -0.2f, -0.2f, 0.5f, 0.1f };
-
- std::vector<float> inputValues = { 1., 8., 3., 4., 5., 4.,
- 3., 2., 1., 2., 3., 4.,
- 5., 4., 3., 2., 1., 2. };
-
- std::vector<float> expectedOutputValues = { 0.0471276f, 0.0168155f, 0.0789885f, 0.16550f,
- 0.0643133f, -0.0400722f, 0.100593f, 0.197722f,
- 0.0465562f, -0.0600682f, 0.0622087f, 0.115053f,
- 0.056287f, -0.0566218f, 0.0856832f, 0.148484f,
- 0.0457859f, -0.0588112f, 0.0623636f, 0.114333f,
- 0.0509271f, -0.0754262f, 0.058600f, 0.0801288f };
-
- tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
- float clippingThresCell = 10.f;
- float clippingThresProj = 0.f;
- bool isTimeMajor = false;
-
- UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
- ::tflite::TensorType_INT8,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- inputValues,
- expectedOutputValues,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- 0.1);
-}
-
-TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRefTests")
-{
-
-TEST_CASE ("UnidirectionalSequenceLstmTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmTimeMajorTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmTimeMajorTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmInt8Test_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmInt8Test(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmTimeInt8TimeMajorTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmInt8TimeMajorTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(backends);
-}
-
-TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
-{
- std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
-}
-
-} //End of TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRef")
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
deleted file mode 100644
index 10555aca1a..0000000000
--- a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
+++ /dev/null
@@ -1,742 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-#include <tensorflow/lite/c/common.h>
-
-#include <doctest/doctest.h>
-
-#include <armnn/utility/IgnoreUnused.hpp>
-#include <armnn/utility/NumericCast.hpp>
-#include <armnn/TypesUtils.hpp>
-
-#include <armnn/Types.hpp>
-
-#include <initializer_list>
-#include <iterator>
-#include <vector>
-
-namespace
-{
-
-template<typename T>
-std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType tensorType,
- int32_t batchSize,
- int32_t timeSize,
- int32_t inputSize,
- int32_t outputSize,
- int32_t numUnits,
- bool hasInputToInputWeights,
- const std::vector<T>& inputToInputWeights,
- const std::vector<T>& inputToForgetWeights,
- const std::vector<T>& inputToCellWeights,
- const std::vector<T>& inputToOutputWeights,
- bool hasRecurrentToInputWeights,
- const std::vector<T>& recurrentToInputWeights,
- const std::vector<T>& recurrentToForgetWeights,
- const std::vector<T>& recurrentToCellWeights,
- const std::vector<T>& recurrentToOutputWeights,
- bool hasCellToInputWeights,
- const std::vector<T>& cellToInputWeights,
- bool hasCellToForgetWeights,
- const std::vector<T>& cellToForgetWeights,
- bool hasCellToOutputWeights,
- const std::vector<T>& cellToOutputWeights,
- bool hasInputGateBias,
- const std::vector<float>& inputGateBias,
- const std::vector<float>& forgetGateBias,
- const std::vector<float>& cellBias,
- const std::vector<float>& outputGateBias,
- bool hasProjectionWeights,
- const std::vector<T>& projectionWeights,
- bool hasProjectionBias,
- const std::vector<float>& projectionBias,
- bool hasInputLayerNormWeights,
- const std::vector<float>& inputLayerNormWeights,
- bool hasForgetLayerNormWeights,
- const std::vector<float>& forgetLayerNormWeights,
- bool hasCellLayerNormWeights,
- const std::vector<float>& cellLayerNormWeights,
- bool hasOutputLayerNormWeights,
- const std::vector<float>& outputLayerNormWeights,
- tflite::ActivationFunctionType activationFunction,
- float clippingThresCell,
- float clippingThresProj,
- bool isTimeMajor,
- float quantScale,
- int quantOffset = 0)
-{
-
- std::vector<int32_t> tensorInfo0{};
- std::vector<int32_t> tensorInfoNumUnits{numUnits};
- std::vector<int32_t> tensorInfoInputSize{numUnits, inputSize};
- std::vector<int32_t> tensorInfoOutputSize{numUnits, outputSize};
-
- std::vector<int32_t> inputShape;
- std::vector<int32_t> outputShape;
- if (isTimeMajor)
- {
- inputShape = {timeSize, batchSize, inputSize};
- outputShape = {timeSize, batchSize, outputSize};
- }
- else
- {
- inputShape = {batchSize, timeSize, inputSize};
- outputShape = {batchSize, timeSize, outputSize};
- }
- std::vector<int32_t> outputStateInDimensions{batchSize, outputSize};
- std::vector<int32_t> cellStateInDimensions{batchSize, numUnits};
- std::vector<int32_t> projectionWeightDimensions{outputSize, numUnits};
- std::vector<int32_t> projectionBiasDimensions{outputSize};
-
- std::vector<int> operatorInputs;
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- std::vector<flatbuffers::Offset<Tensor>> tensors;
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({1.0f}),
- flatBufferBuilder.CreateVector<int64_t>({0}));
-
- auto weightQuantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({quantScale}),
- flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
- inputShape.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("input_0")));
- operatorInputs.push_back(tensors.size() - 1);
-
- if (hasInputToInputWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(inputToInputWeights.data()),
- sizeof(T) * inputToInputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
- tensorInfoInputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToInputWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(inputToForgetWeights.data()),
- sizeof(T) * inputToForgetWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
- tensorInfoInputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToForgetWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(inputToCellWeights.data()),
- sizeof(T) * inputToCellWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
- tensorInfoInputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToCellWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(inputToOutputWeights.data()),
- sizeof(T) * inputToOutputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
- tensorInfoInputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputToOutputWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
-
- if (hasRecurrentToInputWeights)
- {
- buffers.push_back(CreateBuffer(
- flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(recurrentToInputWeights.data()),
- sizeof(T) * recurrentToInputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
- tensorInfoOutputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToInputWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- recurrentToForgetWeights.data()),
- sizeof(T) * recurrentToForgetWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
- tensorInfoOutputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToForgetWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- recurrentToCellWeights.data()),
- sizeof(T) * recurrentToCellWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
- tensorInfoOutputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToCellWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- recurrentToOutputWeights.data()),
- sizeof(T) * recurrentToOutputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
- tensorInfoOutputSize.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("recurrentToOutputWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
-
- if (hasCellToInputWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- cellToInputWeights.data()),
- sizeof(T) * cellToInputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellToInputWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasCellToForgetWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- cellToForgetWeights.data()),
- sizeof(T) * cellToForgetWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellToForgetWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasCellToOutputWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- cellToOutputWeights.data()),
- sizeof(T) * cellToOutputWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellToOutputWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasInputGateBias)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputGateBias.data()),
- sizeof(float) * inputGateBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputGateBias")));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(forgetGateBias.data()),
- sizeof(float) * forgetGateBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("forgetGateBias")));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellBias.data()),
- sizeof(float) * cellBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellBias")));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(outputGateBias.data()),
- sizeof(float) * outputGateBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputGateBias")));
- operatorInputs.push_back(tensors.size() - 1);
-
- if (hasProjectionWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(projectionWeights.data()),
- sizeof(T) * projectionWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(projectionWeightDimensions.data(),
- projectionWeightDimensions.size()),
- tensorType,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("projectionWeights"),
- weightQuantizationParameters));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasProjectionBias)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(projectionBias.data()),
- sizeof(float) * projectionBias.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(projectionBiasDimensions.data(),
- projectionBiasDimensions.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("projectionBias")));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
- outputStateInDimensions.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputStateInInfo"),
- quantizationParameters,
- true));
- operatorInputs.push_back(tensors.size() - 1);
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
- cellStateInDimensions.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellStateInInfo"),
- quantizationParameters,
- true));
- operatorInputs.push_back(tensors.size() - 1);
-
- if (hasInputLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(inputLayerNormWeights.data()),
- sizeof(float) * inputLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("inputLayerNormWeights")));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasForgetLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(forgetLayerNormWeights.data()),
- sizeof(float) * forgetLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("forgetLayerNormWeights")));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasCellLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
- cellLayerNormWeights.data()),
- sizeof(float) * cellLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("cellLayerNormWeights")));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
-
- if (hasOutputLayerNormWeights)
- {
- buffers.push_back(
- CreateBuffer(flatBufferBuilder,
- flatBufferBuilder.CreateVector(
- reinterpret_cast<const uint8_t*>(outputLayerNormWeights.data()),
- sizeof(float) * outputLayerNormWeights.size())));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
- tensorInfoNumUnits.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("outputLayerNormWeights")));
- operatorInputs.push_back(tensors.size() - 1);
- }
- else
- {
- operatorInputs.push_back(kTfLiteOptionalTensor);
- }
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- tensors.push_back(CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
- outputShape.size()),
- ::tflite::TensorType_FLOAT32,
- buffers.size() - 1,
- flatBufferBuilder.CreateString("output")));
- std::vector<int> operatorOutputs;
- operatorOutputs.push_back(tensors.size() - 1);
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor).Union();
-
- flatbuffers::Offset<Operator> lstmOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
- operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- operatorBuiltinOptionsType, operatorBuiltinOptions);
-
- flatbuffers::Offset<SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
- operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
- operatorOutputs.size()),
- flatBufferBuilder.CreateVector(&lstmOperator, 1));
-
- flatbuffers::Offset<flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString(
- "ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
- flatbuffers::Offset<OperatorCode> operatorCode =
- CreateOperatorCode(flatBufferBuilder,
- tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
-
- flatbuffers::Offset<Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template<typename T>
-void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
- tflite::TensorType tensorType,
- int32_t batchSize,
- int32_t timeSize,
- int32_t inputSize,
- int32_t outputSize,
- int32_t numUnits,
- bool hasInputToInputWeights,
- const std::vector<T>& inputToInputWeights,
- const std::vector<T>& inputToForgetWeights,
- const std::vector<T>& inputToCellWeights,
- const std::vector<T>& inputToOutputWeights,
- bool hasRecurrentToInputWeights,
- const std::vector<T>& recurrentToInputWeights,
- const std::vector<T>& recurrentToForgetWeights,
- const std::vector<T>& recurrentToCellWeights,
- const std::vector<T>& recurrentToOutputWeights,
- bool hasCellToInputWeights,
- const std::vector<T>& cellToInputWeights,
- bool hasCellToForgetWeights,
- const std::vector<T>& cellToForgetWeights,
- bool hasCellToOutputWeights,
- const std::vector<T>& cellToOutputWeights,
- bool hasInputGateBias,
- const std::vector<float>& inputGateBias,
- const std::vector<float>& forgetGateBias,
- const std::vector<float>& cellBias,
- const std::vector<float>& outputGateBias,
- bool hasProjectionWeights,
- const std::vector<T>& projectionWeights,
- bool hasProjectionBias,
- const std::vector<float>& projectionBias,
- bool hasInputLayerNormWeights,
- const std::vector<float>& inputLayerNormWeights,
- bool hasForgetLayerNormWeights,
- const std::vector<float>& forgetLayerNormWeights,
- bool hasCellLayerNormWeights,
- const std::vector<float>& cellLayerNormWeights,
- bool hasOutputLayerNormWeights,
- const std::vector<float>& outputLayerNormWeights,
- std::vector<float>& inputValues,
- std::vector<float>& expectedOutputValues,
- tflite::ActivationFunctionType activationFunction,
- float clippingThresCell,
- float clippingThresProj,
- bool isTimeMajor,
- float quantScale = 0.1f)
-{
- using namespace tflite;
-
- std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
- batchSize,
- timeSize,
- inputSize,
- outputSize,
- numUnits,
- hasInputToInputWeights,
- inputToInputWeights,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- hasRecurrentToInputWeights,
- recurrentToInputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- hasCellToInputWeights,
- cellToInputWeights,
- hasCellToForgetWeights,
- cellToForgetWeights,
- hasCellToOutputWeights,
- cellToOutputWeights,
- hasInputGateBias,
- inputGateBias,
- forgetGateBias,
- cellBias,
- outputGateBias,
- hasProjectionWeights,
- projectionWeights,
- hasProjectionBias,
- projectionBias,
- hasInputLayerNormWeights,
- inputLayerNormWeights,
- hasForgetLayerNormWeights,
- forgetLayerNormWeights,
- hasCellLayerNormWeights,
- cellLayerNormWeights,
- hasOutputLayerNormWeights,
- outputLayerNormWeights,
- activationFunction,
- clippingThresCell,
- clippingThresProj,
- isTimeMajor,
- quantScale);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
- auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
- if (tensorType == ::tflite::TensorType_INT8)
- {
- // Allow 2% tolerance for Quantized weights
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
- expectedOutputValues.size(), 2);
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
- expectedOutputValues.size(), 2);
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData,
- expectedOutputValues.size(), 2);
- }
- else
- {
- armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
- expectedOutputValues.size());
- armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
- expectedOutputValues.size());
- armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
- }
-}
-
-} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/UnpackTest.cpp b/delegate/src/test/UnpackTest.cpp
deleted file mode 100644
index c036f649ef..0000000000
--- a/delegate/src/test/UnpackTest.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "UnpackTestHelper.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-
-#include <doctest/doctest.h>
-
-namespace armnnDelegate
-{
-
-template <typename T>
-void UnpackAxis0Num4Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 4, 1, 6 };
- std::vector<int32_t> expectedOutputShape { 1, 6 };
-
- std::vector<T> inputValues { 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 24 };
-
- std::vector<T> expectedOutputValues0 { 1, 2, 3, 4, 5, 6 };
- std::vector<T> expectedOutputValues1 { 7, 8, 9, 10, 11, 12 };
- std::vector<T> expectedOutputValues2 { 13, 14, 15, 16, 17, 18 };
- std::vector<T> expectedOutputValues3 { 19, 20, 21, 22, 23, 24 };
-
- std::vector<std::vector<T>> expectedOutputValues{ expectedOutputValues0,
- expectedOutputValues1,
- expectedOutputValues2,
- expectedOutputValues3 };
-
- UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
- tensorType,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 0);
-}
-
-template <typename T>
-void UnpackAxis2Num6Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
-{
- std::vector<int32_t> inputShape { 4, 1, 6 };
- std::vector<int32_t> expectedOutputShape { 4, 1 };
-
- std::vector<T> inputValues { 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 24 };
-
- std::vector<T> expectedOutputValues0 { 1, 7, 13, 19 };
- std::vector<T> expectedOutputValues1 { 2, 8, 14, 20 };
- std::vector<T> expectedOutputValues2 { 3, 9, 15, 21 };
- std::vector<T> expectedOutputValues3 { 4, 10, 16, 22 };
- std::vector<T> expectedOutputValues4 { 5, 11, 17, 23 };
- std::vector<T> expectedOutputValues5 { 6, 12, 18, 24 };
-
- std::vector<std::vector<T>> expectedOutputValues{ expectedOutputValues0,
- expectedOutputValues1,
- expectedOutputValues2,
- expectedOutputValues3,
- expectedOutputValues4,
- expectedOutputValues5 };
-
- UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
- tensorType,
- backends,
- inputShape,
- expectedOutputShape,
- inputValues,
- expectedOutputValues,
- 2);
-}
-
-TEST_SUITE("Unpack_CpuRefTests")
-{
-
-// Fp32
-TEST_CASE ("Unpack_Fp32_Axis0_Num4_CpuRef_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuRef_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuRef_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuRef_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-} // End of Unpack_CpuRefTests
-
-TEST_SUITE("Unpack_CpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Unpack_Fp32_Axis0_Num4_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-} // End of Unpack_CpuAccTests
-
-TEST_SUITE("Unpack_GpuAccTests")
-{
-
-// Fp32
-TEST_CASE ("Unpack_Fp32_Axis0_Num4_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-TEST_CASE ("Unpack_Fp32_Axis2_Num6_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
-}
-
-// Uint8
-TEST_CASE ("Unpack_Uint8_Axis0_Num4_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-TEST_CASE ("Unpack_Uint8_Axis2_Num6_GpuAcc_Test")
-{
-std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
-}
-
-} // End of Unpack_GpuAccTests
-
-// End of Unpack Test Suite
-
-} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/UnpackTestHelper.hpp b/delegate/src/test/UnpackTestHelper.hpp
deleted file mode 100644
index 0e12d72279..0000000000
--- a/delegate/src/test/UnpackTestHelper.hpp
+++ /dev/null
@@ -1,188 +0,0 @@
-//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-#include <string>
-
-namespace
-{
-
-std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperatorCode,
- tflite::TensorType tensorType,
- std::vector<int32_t>& inputTensorShape,
- const std::vector <int32_t>& outputTensorShape,
- const int32_t outputTensorNum,
- unsigned int axis = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- buffers.push_back(CreateBuffer(flatBufferBuilder));
-
-
- auto quantizationParameters =
- CreateQuantizationParameters(flatBufferBuilder,
- 0,
- 0,
- flatBufferBuilder.CreateVector<float>({ quantScale }),
- flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
- const std::vector<int32_t> operatorInputs{ 0 };
- std::vector<int32_t> operatorOutputs{};
- const std::vector<int> subgraphInputs{ 0 };
- std::vector<int> subgraphOutputs{};
-
- std::vector<flatbuffers::Offset<Tensor>> tensors(outputTensorNum + 1);
-
- // Create input tensor
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
- inputTensorShape.size()),
- tensorType,
- 1,
- flatBufferBuilder.CreateString("input"),
- quantizationParameters);
-
- for (int i = 0; i < outputTensorNum; ++i)
- {
- tensors[i + 1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
- outputTensorShape.size()),
- tensorType,
- (i + 2),
- flatBufferBuilder.CreateString("output" + std::to_string(i)),
- quantizationParameters);
-
- buffers.push_back(CreateBuffer(flatBufferBuilder));
- operatorOutputs.push_back(i + 1);
- subgraphOutputs.push_back(i + 1);
- }
-
- // create operator
- tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_UnpackOptions;
- flatbuffers::Offset<void> operatorBuiltinOptions =
- CreateUnpackOptions(flatBufferBuilder, outputTensorNum, axis).Union();
-
- flatbuffers::Offset <Operator> unpackOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
- operatorBuiltinOptionsType,
- operatorBuiltinOptions);
-
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&unpackOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Unpack Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unpackOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-template <typename T>
-void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
- tflite::TensorType tensorType,
- std::vector<armnn::BackendId>& backends,
- std::vector<int32_t>& inputShape,
- std::vector<int32_t>& expectedOutputShape,
- std::vector<T>& inputValues,
- std::vector<std::vector<T>>& expectedOutputValues,
- unsigned int axis = 0,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
- using namespace tflite;
- std::vector<char> modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode,
- tensorType,
- inputShape,
- expectedOutputShape,
- expectedOutputValues.size(),
- axis,
- quantScale,
- quantOffset);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
-
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
-
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-
- // Run EnqueueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
- {
- armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- expectedOutputShape,
- expectedOutputValues[i],
- i);
- }
-
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file